1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements routines for folding instructions into simpler forms
11 // that do not require creating new instructions. This does constant folding
12 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
13 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
14 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
15 // simplified: This is usually true and assuming it simplifies the logic (if
16 // they have not been simplified then results are correct but maybe suboptimal).
18 //===----------------------------------------------------------------------===//
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/CaptureTracking.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/LoopAnalysisManager.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Analysis/VectorUtils.h"
32 #include "llvm/IR/ConstantRange.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/Operator.h"
38 #include "llvm/IR/PatternMatch.h"
39 #include "llvm/IR/ValueHandle.h"
40 #include "llvm/Support/KnownBits.h"
43 using namespace llvm::PatternMatch;
45 #define DEBUG_TYPE "instsimplify"
47 enum { RecursionLimit = 3 };
49 STATISTIC(NumExpand, "Number of expansions");
50 STATISTIC(NumReassoc, "Number of reassociations");
52 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned);
53 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
55 static Value *SimplifyFPBinOp(unsigned, Value *, Value *, const FastMathFlags &,
56 const SimplifyQuery &, unsigned);
57 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
59 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
60 const SimplifyQuery &Q, unsigned MaxRecurse);
61 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
62 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned);
63 static Value *SimplifyCastInst(unsigned, Value *, Type *,
64 const SimplifyQuery &, unsigned);
66 /// For a boolean type or a vector of boolean type, return false or a vector
67 /// with every element false.
68 static Constant *getFalse(Type *Ty) {
69 return ConstantInt::getFalse(Ty);
72 /// For a boolean type or a vector of boolean type, return true or a vector
73 /// with every element true.
74 static Constant *getTrue(Type *Ty) {
75 return ConstantInt::getTrue(Ty);
78 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
79 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
81 CmpInst *Cmp = dyn_cast<CmpInst>(V);
84 CmpInst::Predicate CPred = Cmp->getPredicate();
85 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
86 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
88 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
92 /// Does the given value dominate the specified phi node?
93 static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
94 Instruction *I = dyn_cast<Instruction>(V);
96 // Arguments and constants dominate all instructions.
99 // If we are processing instructions (and/or basic blocks) that have not been
100 // fully added to a function, the parent nodes may still be null. Simply
101 // return the conservative answer in these cases.
102 if (!I->getParent() || !P->getParent() || !I->getParent()->getParent())
105 // If we have a DominatorTree then do a precise test.
107 if (!DT->isReachableFromEntry(P->getParent()))
109 if (!DT->isReachableFromEntry(I->getParent()))
111 return DT->dominates(I, P);
114 // Otherwise, if the instruction is in the entry block and is not an invoke,
115 // then it obviously dominates all phi nodes.
116 if (I->getParent() == &I->getParent()->getParent()->getEntryBlock() &&
123 /// Simplify "A op (B op' C)" by distributing op over op', turning it into
124 /// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is
125 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS.
126 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)".
127 /// Returns the simplified value, or null if no simplification was performed.
128 static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS,
129 Instruction::BinaryOps OpcodeToExpand,
130 const SimplifyQuery &Q, unsigned MaxRecurse) {
131 // Recursion is always used, so bail out at once if we already hit the limit.
135 // Check whether the expression has the form "(A op' B) op C".
136 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
137 if (Op0->getOpcode() == OpcodeToExpand) {
138 // It does! Try turning it into "(A op C) op' (B op C)".
139 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
140 // Do "A op C" and "B op C" both simplify?
141 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse))
142 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
143 // They do! Return "L op' R" if it simplifies or is already available.
144 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
145 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand)
146 && L == B && R == A)) {
150 // Otherwise return "L op' R" if it simplifies.
151 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
158 // Check whether the expression has the form "A op (B op' C)".
159 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
160 if (Op1->getOpcode() == OpcodeToExpand) {
161 // It does! Try turning it into "(A op B) op' (A op C)".
162 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
163 // Do "A op B" and "A op C" both simplify?
164 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse))
165 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) {
166 // They do! Return "L op' R" if it simplifies or is already available.
167 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
168 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand)
169 && L == C && R == B)) {
173 // Otherwise return "L op' R" if it simplifies.
174 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
184 /// Generic simplifications for associative binary operations.
185 /// Returns the simpler value, or null if none was found.
186 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
187 Value *LHS, Value *RHS,
188 const SimplifyQuery &Q,
189 unsigned MaxRecurse) {
190 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
192 // Recursion is always used, so bail out at once if we already hit the limit.
196 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
197 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
199 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
200 if (Op0 && Op0->getOpcode() == Opcode) {
201 Value *A = Op0->getOperand(0);
202 Value *B = Op0->getOperand(1);
205 // Does "B op C" simplify?
206 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
207 // It does! Return "A op V" if it simplifies or is already available.
208 // If V equals B then "A op V" is just the LHS.
209 if (V == B) return LHS;
210 // Otherwise return "A op V" if it simplifies.
211 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
218 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
219 if (Op1 && Op1->getOpcode() == Opcode) {
221 Value *B = Op1->getOperand(0);
222 Value *C = Op1->getOperand(1);
224 // Does "A op B" simplify?
225 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
226 // It does! Return "V op C" if it simplifies or is already available.
227 // If V equals B then "V op C" is just the RHS.
228 if (V == B) return RHS;
229 // Otherwise return "V op C" if it simplifies.
230 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
237 // The remaining transforms require commutativity as well as associativity.
238 if (!Instruction::isCommutative(Opcode))
241 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
242 if (Op0 && Op0->getOpcode() == Opcode) {
243 Value *A = Op0->getOperand(0);
244 Value *B = Op0->getOperand(1);
247 // Does "C op A" simplify?
248 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
249 // It does! Return "V op B" if it simplifies or is already available.
250 // If V equals A then "V op B" is just the LHS.
251 if (V == A) return LHS;
252 // Otherwise return "V op B" if it simplifies.
253 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
260 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
261 if (Op1 && Op1->getOpcode() == Opcode) {
263 Value *B = Op1->getOperand(0);
264 Value *C = Op1->getOperand(1);
266 // Does "C op A" simplify?
267 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
268 // It does! Return "B op V" if it simplifies or is already available.
269 // If V equals C then "B op V" is just the RHS.
270 if (V == C) return RHS;
271 // Otherwise return "B op V" if it simplifies.
272 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
282 /// In the case of a binary operation with a select instruction as an operand,
283 /// try to simplify the binop by seeing whether evaluating it on both branches
284 /// of the select results in the same value. Returns the common value if so,
285 /// otherwise returns null.
286 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
287 Value *RHS, const SimplifyQuery &Q,
288 unsigned MaxRecurse) {
289 // Recursion is always used, so bail out at once if we already hit the limit.
294 if (isa<SelectInst>(LHS)) {
295 SI = cast<SelectInst>(LHS);
297 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
298 SI = cast<SelectInst>(RHS);
301 // Evaluate the BinOp on the true and false branches of the select.
305 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
306 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
308 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
309 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
312 // If they simplified to the same value, then return the common value.
313 // If they both failed to simplify then return null.
317 // If one branch simplified to undef, return the other one.
318 if (TV && isa<UndefValue>(TV))
320 if (FV && isa<UndefValue>(FV))
323 // If applying the operation did not change the true and false select values,
324 // then the result of the binop is the select itself.
325 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
328 // If one branch simplified and the other did not, and the simplified
329 // value is equal to the unsimplified one, return the simplified value.
330 // For example, select (cond, X, X & Z) & Z -> X & Z.
331 if ((FV && !TV) || (TV && !FV)) {
332 // Check that the simplified value has the form "X op Y" where "op" is the
333 // same as the original operation.
334 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
335 if (Simplified && Simplified->getOpcode() == Opcode) {
336 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
337 // We already know that "op" is the same as for the simplified value. See
338 // if the operands match too. If so, return the simplified value.
339 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
340 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
341 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
342 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
343 Simplified->getOperand(1) == UnsimplifiedRHS)
345 if (Simplified->isCommutative() &&
346 Simplified->getOperand(1) == UnsimplifiedLHS &&
347 Simplified->getOperand(0) == UnsimplifiedRHS)
355 /// In the case of a comparison with a select instruction, try to simplify the
356 /// comparison by seeing whether both branches of the select result in the same
357 /// value. Returns the common value if so, otherwise returns null.
358 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
359 Value *RHS, const SimplifyQuery &Q,
360 unsigned MaxRecurse) {
361 // Recursion is always used, so bail out at once if we already hit the limit.
365 // Make sure the select is on the LHS.
366 if (!isa<SelectInst>(LHS)) {
368 Pred = CmpInst::getSwappedPredicate(Pred);
370 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
371 SelectInst *SI = cast<SelectInst>(LHS);
372 Value *Cond = SI->getCondition();
373 Value *TV = SI->getTrueValue();
374 Value *FV = SI->getFalseValue();
376 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
377 // Does "cmp TV, RHS" simplify?
378 Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, Q, MaxRecurse);
380 // It not only simplified, it simplified to the select condition. Replace
382 TCmp = getTrue(Cond->getType());
384 // It didn't simplify. However if "cmp TV, RHS" is equal to the select
385 // condition then we can replace it with 'true'. Otherwise give up.
386 if (!isSameCompare(Cond, Pred, TV, RHS))
388 TCmp = getTrue(Cond->getType());
391 // Does "cmp FV, RHS" simplify?
392 Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, Q, MaxRecurse);
394 // It not only simplified, it simplified to the select condition. Replace
396 FCmp = getFalse(Cond->getType());
398 // It didn't simplify. However if "cmp FV, RHS" is equal to the select
399 // condition then we can replace it with 'false'. Otherwise give up.
400 if (!isSameCompare(Cond, Pred, FV, RHS))
402 FCmp = getFalse(Cond->getType());
405 // If both sides simplified to the same value, then use it as the result of
406 // the original comparison.
410 // The remaining cases only make sense if the select condition has the same
411 // type as the result of the comparison, so bail out if this is not so.
412 if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
414 // If the false value simplified to false, then the result of the compare
415 // is equal to "Cond && TCmp". This also catches the case when the false
416 // value simplified to false and the true value to true, returning "Cond".
417 if (match(FCmp, m_Zero()))
418 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
420 // If the true value simplified to true, then the result of the compare
421 // is equal to "Cond || FCmp".
422 if (match(TCmp, m_One()))
423 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
425 // Finally, if the false value simplified to true and the true value to
426 // false, then the result of the compare is equal to "!Cond".
427 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
429 SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()),
436 /// In the case of a binary operation with an operand that is a PHI instruction,
437 /// try to simplify the binop by seeing whether evaluating it on the incoming
438 /// phi values yields the same result for every value. If so returns the common
439 /// value, otherwise returns null.
440 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
441 Value *RHS, const SimplifyQuery &Q,
442 unsigned MaxRecurse) {
443 // Recursion is always used, so bail out at once if we already hit the limit.
448 if (isa<PHINode>(LHS)) {
449 PI = cast<PHINode>(LHS);
450 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
451 if (!ValueDominatesPHI(RHS, PI, Q.DT))
454 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
455 PI = cast<PHINode>(RHS);
456 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
457 if (!ValueDominatesPHI(LHS, PI, Q.DT))
461 // Evaluate the BinOp on the incoming phi values.
462 Value *CommonValue = nullptr;
463 for (Value *Incoming : PI->incoming_values()) {
464 // If the incoming value is the phi node itself, it can safely be skipped.
465 if (Incoming == PI) continue;
466 Value *V = PI == LHS ?
467 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
468 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
469 // If the operation failed to simplify, or simplified to a different value
470 // to previously, then give up.
471 if (!V || (CommonValue && V != CommonValue))
479 /// In the case of a comparison with a PHI instruction, try to simplify the
480 /// comparison by seeing whether comparing with all of the incoming phi values
481 /// yields the same result every time. If so returns the common result,
482 /// otherwise returns null.
483 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
484 const SimplifyQuery &Q, unsigned MaxRecurse) {
485 // Recursion is always used, so bail out at once if we already hit the limit.
489 // Make sure the phi is on the LHS.
490 if (!isa<PHINode>(LHS)) {
492 Pred = CmpInst::getSwappedPredicate(Pred);
494 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
495 PHINode *PI = cast<PHINode>(LHS);
497 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
498 if (!ValueDominatesPHI(RHS, PI, Q.DT))
501 // Evaluate the BinOp on the incoming phi values.
502 Value *CommonValue = nullptr;
503 for (Value *Incoming : PI->incoming_values()) {
504 // If the incoming value is the phi node itself, it can safely be skipped.
505 if (Incoming == PI) continue;
506 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse);
507 // If the operation failed to simplify, or simplified to a different value
508 // to previously, then give up.
509 if (!V || (CommonValue && V != CommonValue))
517 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
518 Value *&Op0, Value *&Op1,
519 const SimplifyQuery &Q) {
520 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
521 if (auto *CRHS = dyn_cast<Constant>(Op1))
522 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
524 // Canonicalize the constant to the RHS if this is a commutative operation.
525 if (Instruction::isCommutative(Opcode))
531 /// Given operands for an Add, see if we can fold the result.
532 /// If not, this returns null.
533 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
534 const SimplifyQuery &Q, unsigned MaxRecurse) {
535 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
538 // X + undef -> undef
539 if (match(Op1, m_Undef()))
543 if (match(Op1, m_Zero()))
550 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
551 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
554 // X + ~X -> -1 since ~X = -X-1
555 Type *Ty = Op0->getType();
556 if (match(Op0, m_Not(m_Specific(Op1))) ||
557 match(Op1, m_Not(m_Specific(Op0))))
558 return Constant::getAllOnesValue(Ty);
560 // add nsw/nuw (xor Y, signmask), signmask --> Y
561 // The no-wrapping add guarantees that the top bit will be set by the add.
562 // Therefore, the xor must be clearing the already set sign bit of Y.
563 if ((isNSW || isNUW) && match(Op1, m_SignMask()) &&
564 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
568 if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1))
569 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
572 // Try some generic simplifications for associative operations.
573 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
577 // Threading Add over selects and phi nodes is pointless, so don't bother.
578 // Threading over the select in "A + select(cond, B, C)" means evaluating
579 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
580 // only if B and C are equal. If B and C are equal then (since we assume
581 // that operands have already been simplified) "select(cond, B, C)" should
582 // have been simplified to the common value of B and C already. Analysing
583 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
584 // for threading over phi nodes.
589 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
590 const SimplifyQuery &Query) {
591 return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query, RecursionLimit);
594 /// \brief Compute the base pointer and cumulative constant offsets for V.
596 /// This strips all constant offsets off of V, leaving it the base pointer, and
597 /// accumulates the total constant offset applied in the returned constant. It
598 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
599 /// no constant offsets applied.
601 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
602 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
604 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
605 bool AllowNonInbounds = false) {
606 assert(V->getType()->getScalarType()->isPointerTy());
608 Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
609 APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
611 // Even though we don't look through PHI nodes, we could be called on an
612 // instruction in an unreachable block, which may be on a cycle.
613 SmallPtrSet<Value *, 4> Visited;
616 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
617 if ((!AllowNonInbounds && !GEP->isInBounds()) ||
618 !GEP->accumulateConstantOffset(DL, Offset))
620 V = GEP->getPointerOperand();
621 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
622 V = cast<Operator>(V)->getOperand(0);
623 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
624 if (GA->isInterposable())
626 V = GA->getAliasee();
628 if (auto CS = CallSite(V))
629 if (Value *RV = CS.getReturnedArgOperand()) {
635 assert(V->getType()->getScalarType()->isPointerTy() &&
636 "Unexpected operand type!");
637 } while (Visited.insert(V).second);
639 Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset);
640 if (V->getType()->isVectorTy())
641 return ConstantVector::getSplat(V->getType()->getVectorNumElements(),
646 /// \brief Compute the constant difference between two pointer values.
647 /// If the difference is not a constant, returns zero.
648 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
650 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
651 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
653 // If LHS and RHS are not related via constant offsets to the same base
654 // value, there is nothing we can do here.
658 // Otherwise, the difference of LHS - RHS can be computed as:
660 // = (LHSOffset + Base) - (RHSOffset + Base)
661 // = LHSOffset - RHSOffset
662 return ConstantExpr::getSub(LHSOffset, RHSOffset);
665 /// Given operands for a Sub, see if we can fold the result.
666 /// If not, this returns null.
667 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
668 const SimplifyQuery &Q, unsigned MaxRecurse) {
669 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
672 // X - undef -> undef
673 // undef - X -> undef
674 if (match(Op0, m_Undef()) || match(Op1, m_Undef()))
675 return UndefValue::get(Op0->getType());
678 if (match(Op1, m_Zero()))
683 return Constant::getNullValue(Op0->getType());
685 // Is this a negation?
686 if (match(Op0, m_Zero())) {
687 // 0 - X -> 0 if the sub is NUW.
691 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
692 if (Known.Zero.isMaxSignedValue()) {
693 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
694 // Op1 must be 0 because negating the minimum signed value is undefined.
698 // 0 - X -> X if X is 0 or the minimum signed value.
703 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
704 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
705 Value *X = nullptr, *Y = nullptr, *Z = Op1;
706 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
707 // See if "V === Y - Z" simplifies.
708 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
709 // It does! Now see if "X + V" simplifies.
710 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
711 // It does, we successfully reassociated!
715 // See if "V === X - Z" simplifies.
716 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
717 // It does! Now see if "Y + V" simplifies.
718 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
719 // It does, we successfully reassociated!
725 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
726 // For example, X - (X + 1) -> -1
728 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
729 // See if "V === X - Y" simplifies.
730 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
731 // It does! Now see if "V - Z" simplifies.
732 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
733 // It does, we successfully reassociated!
737 // See if "V === X - Z" simplifies.
738 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
739 // It does! Now see if "V - Y" simplifies.
740 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
741 // It does, we successfully reassociated!
747 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
748 // For example, X - (X - Y) -> Y.
750 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
751 // See if "V === Z - X" simplifies.
752 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
753 // It does! Now see if "V + Y" simplifies.
754 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
755 // It does, we successfully reassociated!
760 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
761 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
762 match(Op1, m_Trunc(m_Value(Y))))
763 if (X->getType() == Y->getType())
764 // See if "V === X - Y" simplifies.
765 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
766 // It does! Now see if "trunc V" simplifies.
767 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(),
769 // It does, return the simplified "trunc V".
772 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
773 if (match(Op0, m_PtrToInt(m_Value(X))) &&
774 match(Op1, m_PtrToInt(m_Value(Y))))
775 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
776 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
779 if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1))
780 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
783 // Threading Sub over selects and phi nodes is pointless, so don't bother.
784 // Threading over the select in "A - select(cond, B, C)" means evaluating
785 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
786 // only if B and C are equal. If B and C are equal then (since we assume
787 // that operands have already been simplified) "select(cond, B, C)" should
788 // have been simplified to the common value of B and C already. Analysing
789 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
790 // for threading over phi nodes.
795 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
796 const SimplifyQuery &Q) {
797 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
800 /// Given operands for an FAdd, see if we can fold the result. If not, this
802 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
803 const SimplifyQuery &Q, unsigned MaxRecurse) {
804 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
808 if (match(Op1, m_NegZero()))
811 // fadd X, 0 ==> X, when we know X is not -0
812 if (match(Op1, m_Zero()) &&
813 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
816 // fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0
817 // where nnan and ninf have to occur at least once somewhere in this
819 Value *SubOp = nullptr;
820 if (match(Op1, m_FSub(m_AnyZero(), m_Specific(Op0))))
822 else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1))))
825 Instruction *FSub = cast<Instruction>(SubOp);
826 if ((FMF.noNaNs() || FSub->hasNoNaNs()) &&
827 (FMF.noInfs() || FSub->hasNoInfs()))
828 return Constant::getNullValue(Op0->getType());
834 /// Given operands for an FSub, see if we can fold the result. If not, this
836 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
837 const SimplifyQuery &Q, unsigned MaxRecurse) {
838 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
842 if (match(Op1, m_Zero()))
845 // fsub X, -0 ==> X, when we know X is not -0
846 if (match(Op1, m_NegZero()) &&
847 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
850 // fsub -0.0, (fsub -0.0, X) ==> X
852 if (match(Op0, m_NegZero()) && match(Op1, m_FSub(m_NegZero(), m_Value(X))))
855 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
856 if (FMF.noSignedZeros() && match(Op0, m_AnyZero()) &&
857 match(Op1, m_FSub(m_AnyZero(), m_Value(X))))
860 // fsub nnan x, x ==> 0.0
861 if (FMF.noNaNs() && Op0 == Op1)
862 return Constant::getNullValue(Op0->getType());
867 /// Given the operands for an FMul, see if we can fold the result
868 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
869 const SimplifyQuery &Q, unsigned MaxRecurse) {
870 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
874 if (match(Op1, m_FPOne()))
877 // fmul nnan nsz X, 0 ==> 0
878 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZero()))
884 /// Given operands for a Mul, see if we can fold the result.
885 /// If not, this returns null.
886 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
887 unsigned MaxRecurse) {
888 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
892 if (match(Op1, m_Undef()))
893 return Constant::getNullValue(Op0->getType());
896 if (match(Op1, m_Zero()))
900 if (match(Op1, m_One()))
903 // (X / Y) * Y -> X if the division is exact.
905 if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
906 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
910 if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1))
911 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
914 // Try some generic simplifications for associative operations.
915 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
919 // Mul distributes over Add. Try some generic simplifications based on this.
920 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add,
924 // If the operation is with the result of a select instruction, check whether
925 // operating on either branch of the select always yields the same value.
926 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
927 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
931 // If the operation is with the result of a phi instruction, check whether
932 // operating on all incoming values of the phi always yields the same value.
933 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
934 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
941 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
942 const SimplifyQuery &Q) {
943 return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit);
947 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
948 const SimplifyQuery &Q) {
949 return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit);
952 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
953 const SimplifyQuery &Q) {
954 return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit);
957 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
958 return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit);
961 /// Check for common or similar folds of integer division or integer remainder.
962 static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) {
963 Type *Ty = Op0->getType();
965 // X / undef -> undef
966 // X % undef -> undef
967 if (match(Op1, m_Undef()))
972 // We don't need to preserve faults!
973 if (match(Op1, m_Zero()))
974 return UndefValue::get(Ty);
976 // If any element of a constant divisor vector is zero, the whole op is undef.
977 auto *Op1C = dyn_cast<Constant>(Op1);
978 if (Op1C && Ty->isVectorTy()) {
979 unsigned NumElts = Ty->getVectorNumElements();
980 for (unsigned i = 0; i != NumElts; ++i) {
981 Constant *Elt = Op1C->getAggregateElement(i);
982 if (Elt && Elt->isNullValue())
983 return UndefValue::get(Ty);
989 if (match(Op0, m_Undef()))
990 return Constant::getNullValue(Ty);
994 if (match(Op0, m_Zero()))
1000 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
1004 // If this is a boolean op (single-bit element type), we can't have
1005 // division-by-zero or remainder-by-zero, so assume the divisor is 1.
1006 if (match(Op1, m_One()) || Ty->getScalarType()->isIntegerTy(1))
1007 return IsDiv ? Op0 : Constant::getNullValue(Ty);
1012 /// Given operands for an SDiv or UDiv, see if we can fold the result.
1013 /// If not, this returns null.
1014 static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1015 const SimplifyQuery &Q, unsigned MaxRecurse) {
1016 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1019 if (Value *V = simplifyDivRem(Op0, Op1, true))
1022 bool isSigned = Opcode == Instruction::SDiv;
1024 // (X * Y) / Y -> X if the multiplication does not overflow.
1025 Value *X = nullptr, *Y = nullptr;
1026 if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) {
1027 if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1
1028 OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0);
1029 // If the Mul knows it does not overflow, then we are good to go.
1030 if ((isSigned && Mul->hasNoSignedWrap()) ||
1031 (!isSigned && Mul->hasNoUnsignedWrap()))
1033 // If X has the form X = A / Y then X * Y cannot overflow.
1034 if (BinaryOperator *Div = dyn_cast<BinaryOperator>(X))
1035 if (Div->getOpcode() == Opcode && Div->getOperand(1) == Y)
1039 // (X rem Y) / Y -> 0
1040 if ((isSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1041 (!isSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1042 return Constant::getNullValue(Op0->getType());
1044 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
1045 ConstantInt *C1, *C2;
1046 if (!isSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) &&
1047 match(Op1, m_ConstantInt(C2))) {
1049 (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
1051 return Constant::getNullValue(Op0->getType());
1054 // If the operation is with the result of a select instruction, check whether
1055 // operating on either branch of the select always yields the same value.
1056 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1057 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1060 // If the operation is with the result of a phi instruction, check whether
1061 // operating on all incoming values of the phi always yields the same value.
1062 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1063 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1069 /// Given operands for an SDiv, see if we can fold the result.
1070 /// If not, this returns null.
1071 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1072 unsigned MaxRecurse) {
1073 if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse))
1079 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1080 return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit);
1083 /// Given operands for a UDiv, see if we can fold the result.
1084 /// If not, this returns null.
1085 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1086 unsigned MaxRecurse) {
1087 if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse))
1090 // udiv %V, C -> 0 if %V < C
1092 if (Constant *C = dyn_cast_or_null<Constant>(SimplifyICmpInst(
1093 ICmpInst::ICMP_ULT, Op0, Op1, Q, MaxRecurse - 1))) {
1094 if (C->isAllOnesValue()) {
1095 return Constant::getNullValue(Op0->getType());
1103 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1104 return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit);
1107 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1108 const SimplifyQuery &Q, unsigned) {
1109 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
1112 // undef / X -> undef (the undef could be a snan).
1113 if (match(Op0, m_Undef()))
1116 // X / undef -> undef
1117 if (match(Op1, m_Undef()))
1121 if (match(Op1, m_FPOne()))
1125 // Requires that NaNs are off (X could be zero) and signed zeroes are
1126 // ignored (X could be positive or negative, so the output sign is unknown).
1127 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
1131 // X / X -> 1.0 is legal when NaNs are ignored.
1133 return ConstantFP::get(Op0->getType(), 1.0);
1135 // -X / X -> -1.0 and
1136 // X / -X -> -1.0 are legal when NaNs are ignored.
1137 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
1138 if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) &&
1139 BinaryOperator::getFNegArgument(Op0) == Op1) ||
1140 (BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) &&
1141 BinaryOperator::getFNegArgument(Op1) == Op0))
1142 return ConstantFP::get(Op0->getType(), -1.0);
1148 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1149 const SimplifyQuery &Q) {
1150 return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit);
1153 /// Given operands for an SRem or URem, see if we can fold the result.
1154 /// If not, this returns null.
1155 static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1156 const SimplifyQuery &Q, unsigned MaxRecurse) {
1157 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1160 if (Value *V = simplifyDivRem(Op0, Op1, false))
1163 // (X % Y) % Y -> X % Y
1164 if ((Opcode == Instruction::SRem &&
1165 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1166 (Opcode == Instruction::URem &&
1167 match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1170 // If the operation is with the result of a select instruction, check whether
1171 // operating on either branch of the select always yields the same value.
1172 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1173 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1176 // If the operation is with the result of a phi instruction, check whether
1177 // operating on all incoming values of the phi always yields the same value.
1178 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1179 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1185 /// Given operands for an SRem, see if we can fold the result.
1186 /// If not, this returns null.
1187 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1188 unsigned MaxRecurse) {
1189 if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse))
1195 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1196 return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit);
1199 /// Given operands for a URem, see if we can fold the result.
1200 /// If not, this returns null.
1201 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1202 unsigned MaxRecurse) {
1203 if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse))
1206 // urem %V, C -> %V if %V < C
1208 if (Constant *C = dyn_cast_or_null<Constant>(SimplifyICmpInst(
1209 ICmpInst::ICMP_ULT, Op0, Op1, Q, MaxRecurse - 1))) {
1210 if (C->isAllOnesValue()) {
1219 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1220 return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit);
1223 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1224 const SimplifyQuery &Q, unsigned) {
1225 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
1228 // undef % X -> undef (the undef could be a snan).
1229 if (match(Op0, m_Undef()))
1232 // X % undef -> undef
1233 if (match(Op1, m_Undef()))
1237 // Requires that NaNs are off (X could be zero) and signed zeroes are
1238 // ignored (X could be positive or negative, so the output sign is unknown).
1239 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
1245 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1246 const SimplifyQuery &Q) {
1247 return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit);
1250 /// Returns true if a shift by \c Amount always yields undef.
1251 static bool isUndefShift(Value *Amount) {
1252 Constant *C = dyn_cast<Constant>(Amount);
1256 // X shift by undef -> undef because it may shift by the bitwidth.
1257 if (isa<UndefValue>(C))
1260 // Shifting by the bitwidth or more is undefined.
1261 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1262 if (CI->getValue().getLimitedValue() >=
1263 CI->getType()->getScalarSizeInBits())
1266 // If all lanes of a vector shift are undefined the whole shift is.
1267 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1268 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I)
1269 if (!isUndefShift(C->getAggregateElement(I)))
1277 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1278 /// If not, this returns null.
1279 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1280 Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse) {
1281 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1284 // 0 shift by X -> 0
1285 if (match(Op0, m_Zero()))
1288 // X shift by 0 -> X
1289 if (match(Op1, m_Zero()))
1292 // Fold undefined shifts.
1293 if (isUndefShift(Op1))
1294 return UndefValue::get(Op0->getType());
1296 // If the operation is with the result of a select instruction, check whether
1297 // operating on either branch of the select always yields the same value.
1298 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1299 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1302 // If the operation is with the result of a phi instruction, check whether
1303 // operating on all incoming values of the phi always yields the same value.
1304 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1305 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1308 // If any bits in the shift amount make that value greater than or equal to
1309 // the number of bits in the type, the shift is undefined.
1310 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1311 if (Known.One.getLimitedValue() >= Known.getBitWidth())
1312 return UndefValue::get(Op0->getType());
1314 // If all valid bits in the shift amount are known zero, the first operand is
1316 unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth());
1317 if (Known.countMinTrailingZeros() >= NumValidShiftBits)
1323 /// \brief Given operands for an Shl, LShr or AShr, see if we can
1324 /// fold the result. If not, this returns null.
1325 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1326 Value *Op1, bool isExact, const SimplifyQuery &Q,
1327 unsigned MaxRecurse) {
1328 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse))
1333 return Constant::getNullValue(Op0->getType());
1336 // undef >> X -> undef (if it's exact)
1337 if (match(Op0, m_Undef()))
1338 return isExact ? Op0 : Constant::getNullValue(Op0->getType());
1340 // The low bit cannot be shifted out of an exact shift if it is set.
1342 KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1343 if (Op0Known.One[0])
1350 /// Given operands for an Shl, see if we can fold the result.
1351 /// If not, this returns null.
1352 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1353 const SimplifyQuery &Q, unsigned MaxRecurse) {
1354 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse))
1358 // undef << X -> undef if (if it's NSW/NUW)
1359 if (match(Op0, m_Undef()))
1360 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType());
1362 // (X >> A) << A -> X
1364 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1369 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1370 const SimplifyQuery &Q) {
1371 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
1374 /// Given operands for an LShr, see if we can fold the result.
1375 /// If not, this returns null.
1376 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1377 const SimplifyQuery &Q, unsigned MaxRecurse) {
1378 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q,
1382 // (X << A) >> A -> X
1384 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1390 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1391 const SimplifyQuery &Q) {
1392 return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1395 /// Given operands for an AShr, see if we can fold the result.
1396 /// If not, this returns null.
1397 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1398 const SimplifyQuery &Q, unsigned MaxRecurse) {
1399 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q,
1403 // all ones >>a X -> all ones
1404 if (match(Op0, m_AllOnes()))
1407 // (X << A) >> A -> X
1409 if (match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1412 // Arithmetic shifting an all-sign-bit value is a no-op.
1413 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1414 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1420 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1421 const SimplifyQuery &Q) {
1422 return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1425 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1426 ICmpInst *UnsignedICmp, bool IsAnd) {
1429 ICmpInst::Predicate EqPred;
1430 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1431 !ICmpInst::isEquality(EqPred))
1434 ICmpInst::Predicate UnsignedPred;
1435 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1436 ICmpInst::isUnsigned(UnsignedPred))
1438 else if (match(UnsignedICmp,
1439 m_ICmp(UnsignedPred, m_Value(Y), m_Specific(X))) &&
1440 ICmpInst::isUnsigned(UnsignedPred))
1441 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1445 // X < Y && Y != 0 --> X < Y
1446 // X < Y || Y != 0 --> Y != 0
1447 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1448 return IsAnd ? UnsignedICmp : ZeroICmp;
1450 // X >= Y || Y != 0 --> true
1451 // X >= Y || Y == 0 --> X >= Y
1452 if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd) {
1453 if (EqPred == ICmpInst::ICMP_NE)
1454 return getTrue(UnsignedICmp->getType());
1455 return UnsignedICmp;
1458 // X < Y && Y == 0 --> false
1459 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1461 return getFalse(UnsignedICmp->getType());
1466 /// Commuted variants are assumed to be handled by calling this function again
1467 /// with the parameters swapped.
1468 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1469 ICmpInst::Predicate Pred0, Pred1;
1471 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1472 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1475 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B).
1476 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1477 // can eliminate Op1 from this 'and'.
1478 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1481 // Check for any combination of predicates that are guaranteed to be disjoint.
1482 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1483 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) ||
1484 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) ||
1485 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT))
1486 return getFalse(Op0->getType());
1491 /// Commuted variants are assumed to be handled by calling this function again
1492 /// with the parameters swapped.
1493 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1494 ICmpInst::Predicate Pred0, Pred1;
1496 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1497 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1500 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
1501 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1502 // can eliminate Op0 from this 'or'.
1503 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1506 // Check for any combination of predicates that cover the entire range of
1508 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1509 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
1510 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
1511 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
1512 return getTrue(Op0->getType());
1517 /// Test if a pair of compares with a shared operand and 2 constants has an
1518 /// empty set intersection, full set union, or if one compare is a superset of
1520 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1522 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1523 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1526 const APInt *C0, *C1;
1527 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1528 !match(Cmp1->getOperand(1), m_APInt(C1)))
1531 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1532 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1534 // For and-of-compares, check if the intersection is empty:
1535 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1536 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1537 return getFalse(Cmp0->getType());
1539 // For or-of-compares, check if the union is full:
1540 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1541 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1542 return getTrue(Cmp0->getType());
1544 // Is one range a superset of the other?
1545 // If this is and-of-compares, take the smaller set:
1546 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1547 // If this is or-of-compares, take the larger set:
1548 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1549 if (Range0.contains(Range1))
1550 return IsAnd ? Cmp1 : Cmp0;
1551 if (Range1.contains(Range0))
1552 return IsAnd ? Cmp0 : Cmp1;
1557 /// Commuted variants are assumed to be handled by calling this function again
1558 /// with the parameters swapped.
1559 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
1560 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
1563 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
1566 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1569 // (icmp (add V, C0), C1) & (icmp V, C0)
1570 Type *ITy = Op0->getType();
1571 ICmpInst::Predicate Pred0, Pred1;
1572 const APInt *C0, *C1;
1574 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1577 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1580 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1581 if (AddInst->getOperand(1) != Op1->getOperand(1))
1584 bool isNSW = AddInst->hasNoSignedWrap();
1585 bool isNUW = AddInst->hasNoUnsignedWrap();
1587 const APInt Delta = *C1 - *C0;
1588 if (C0->isStrictlyPositive()) {
1590 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1591 return getFalse(ITy);
1592 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1593 return getFalse(ITy);
1596 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1597 return getFalse(ITy);
1598 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1599 return getFalse(ITy);
1602 if (C0->getBoolValue() && isNUW) {
1604 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1605 return getFalse(ITy);
1607 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1608 return getFalse(ITy);
1614 /// Commuted variants are assumed to be handled by calling this function again
1615 /// with the parameters swapped.
1616 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
1617 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
1620 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
1623 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1626 // (icmp (add V, C0), C1) | (icmp V, C0)
1627 ICmpInst::Predicate Pred0, Pred1;
1628 const APInt *C0, *C1;
1630 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1633 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1636 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1637 if (AddInst->getOperand(1) != Op1->getOperand(1))
1640 Type *ITy = Op0->getType();
1641 bool isNSW = AddInst->hasNoSignedWrap();
1642 bool isNUW = AddInst->hasNoUnsignedWrap();
1644 const APInt Delta = *C1 - *C0;
1645 if (C0->isStrictlyPositive()) {
1647 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1648 return getTrue(ITy);
1649 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1650 return getTrue(ITy);
1653 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1654 return getTrue(ITy);
1655 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1656 return getTrue(ITy);
1659 if (C0->getBoolValue() && isNUW) {
1661 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1662 return getTrue(ITy);
1664 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1665 return getTrue(ITy);
1671 static Value *simplifyPossiblyCastedAndOrOfICmps(ICmpInst *Cmp0, ICmpInst *Cmp1,
1672 bool IsAnd, CastInst *Cast) {
1674 IsAnd ? simplifyAndOfICmps(Cmp0, Cmp1) : simplifyOrOfICmps(Cmp0, Cmp1);
1680 // If we looked through casts, we can only handle a constant simplification
1681 // because we are not allowed to create a cast instruction here.
1682 if (auto *C = dyn_cast<Constant>(V))
1683 return ConstantExpr::getCast(Cast->getOpcode(), C, Cast->getType());
1688 static Value *simplifyAndOrOfICmps(Value *Op0, Value *Op1, bool IsAnd) {
1689 // Look through casts of the 'and' operands to find compares.
1690 auto *Cast0 = dyn_cast<CastInst>(Op0);
1691 auto *Cast1 = dyn_cast<CastInst>(Op1);
1692 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1693 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1694 Op0 = Cast0->getOperand(0);
1695 Op1 = Cast1->getOperand(0);
1698 auto *Cmp0 = dyn_cast<ICmpInst>(Op0);
1699 auto *Cmp1 = dyn_cast<ICmpInst>(Op1);
1703 if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp0, Cmp1, IsAnd, Cast0))
1705 if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp1, Cmp0, IsAnd, Cast0))
1711 /// Given operands for an And, see if we can fold the result.
1712 /// If not, this returns null.
1713 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1714 unsigned MaxRecurse) {
1715 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
1719 if (match(Op1, m_Undef()))
1720 return Constant::getNullValue(Op0->getType());
1727 if (match(Op1, m_Zero()))
1731 if (match(Op1, m_AllOnes()))
1734 // A & ~A = ~A & A = 0
1735 if (match(Op0, m_Not(m_Specific(Op1))) ||
1736 match(Op1, m_Not(m_Specific(Op0))))
1737 return Constant::getNullValue(Op0->getType());
1740 Value *A = nullptr, *B = nullptr;
1741 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
1742 (A == Op1 || B == Op1))
1746 if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
1747 (A == Op0 || B == Op0))
1750 // A mask that only clears known zeros of a shifted value is a no-op.
1754 if (match(Op1, m_APInt(Mask))) {
1755 // If all bits in the inverted and shifted mask are clear:
1756 // and (shl X, ShAmt), Mask --> shl X, ShAmt
1757 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
1758 (~(*Mask)).lshr(*ShAmt).isNullValue())
1761 // If all bits in the inverted and shifted mask are clear:
1762 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
1763 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
1764 (~(*Mask)).shl(*ShAmt).isNullValue())
1768 // A & (-A) = A if A is a power of two or zero.
1769 if (match(Op0, m_Neg(m_Specific(Op1))) ||
1770 match(Op1, m_Neg(m_Specific(Op0)))) {
1771 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
1774 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
1779 if (Value *V = simplifyAndOrOfICmps(Op0, Op1, true))
1782 // Try some generic simplifications for associative operations.
1783 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
1787 // And distributes over Or. Try some generic simplifications based on this.
1788 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or,
1792 // And distributes over Xor. Try some generic simplifications based on this.
1793 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor,
1797 // If the operation is with the result of a select instruction, check whether
1798 // operating on either branch of the select always yields the same value.
1799 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1800 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
1804 // If the operation is with the result of a phi instruction, check whether
1805 // operating on all incoming values of the phi always yields the same value.
1806 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1807 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
1814 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1815 return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
1818 /// Given operands for an Or, see if we can fold the result.
1819 /// If not, this returns null.
1820 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1821 unsigned MaxRecurse) {
1822 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
1826 if (match(Op1, m_Undef()))
1827 return Constant::getAllOnesValue(Op0->getType());
1834 if (match(Op1, m_Zero()))
1838 if (match(Op1, m_AllOnes()))
1841 // A | ~A = ~A | A = -1
1842 if (match(Op0, m_Not(m_Specific(Op1))) ||
1843 match(Op1, m_Not(m_Specific(Op0))))
1844 return Constant::getAllOnesValue(Op0->getType());
1847 Value *A = nullptr, *B = nullptr;
1848 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1849 (A == Op1 || B == Op1))
1853 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
1854 (A == Op0 || B == Op0))
1857 // ~(A & ?) | A = -1
1858 if (match(Op0, m_Not(m_And(m_Value(A), m_Value(B)))) &&
1859 (A == Op1 || B == Op1))
1860 return Constant::getAllOnesValue(Op1->getType());
1862 // A | ~(A & ?) = -1
1863 if (match(Op1, m_Not(m_And(m_Value(A), m_Value(B)))) &&
1864 (A == Op0 || B == Op0))
1865 return Constant::getAllOnesValue(Op0->getType());
1867 // (A & ~B) | (A ^ B) -> (A ^ B)
1868 // (~B & A) | (A ^ B) -> (A ^ B)
1869 // (A & ~B) | (B ^ A) -> (B ^ A)
1870 // (~B & A) | (B ^ A) -> (B ^ A)
1871 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
1872 (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
1873 match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
1876 // Commute the 'or' operands.
1877 // (A ^ B) | (A & ~B) -> (A ^ B)
1878 // (A ^ B) | (~B & A) -> (A ^ B)
1879 // (B ^ A) | (A & ~B) -> (B ^ A)
1880 // (B ^ A) | (~B & A) -> (B ^ A)
1881 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
1882 (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
1883 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
1886 // (A & B) | (~A ^ B) -> (~A ^ B)
1887 // (B & A) | (~A ^ B) -> (~A ^ B)
1888 // (A & B) | (B ^ ~A) -> (B ^ ~A)
1889 // (B & A) | (B ^ ~A) -> (B ^ ~A)
1890 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1891 (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
1892 match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
1895 // (~A ^ B) | (A & B) -> (~A ^ B)
1896 // (~A ^ B) | (B & A) -> (~A ^ B)
1897 // (B ^ ~A) | (A & B) -> (B ^ ~A)
1898 // (B ^ ~A) | (B & A) -> (B ^ ~A)
1899 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
1900 (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
1901 match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
1904 if (Value *V = simplifyAndOrOfICmps(Op0, Op1, false))
1907 // Try some generic simplifications for associative operations.
1908 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
1912 // Or distributes over And. Try some generic simplifications based on this.
1913 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q,
1917 // If the operation is with the result of a select instruction, check whether
1918 // operating on either branch of the select always yields the same value.
1919 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1920 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
1924 // (A & C1)|(B & C2)
1925 ConstantInt *C1, *C2;
1926 if (match(Op0, m_And(m_Value(A), m_ConstantInt(C1))) &&
1927 match(Op1, m_And(m_Value(B), m_ConstantInt(C2)))) {
1928 if (C1->getValue() == ~C2->getValue()) {
1929 // (A & C1)|(B & C2)
1930 // If we have: ((V + N) & C1) | (V & C2)
1931 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
1932 // replace with V+N.
1934 if (C2->getValue().isMask() && // C2 == 0+1+
1935 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
1936 // Add commutes, try both ways.
1938 MaskedValueIsZero(V2, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1941 MaskedValueIsZero(V1, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1944 // Or commutes, try both ways.
1945 if (C1->getValue().isMask() &&
1946 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
1947 // Add commutes, try both ways.
1949 MaskedValueIsZero(V2, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1952 MaskedValueIsZero(V1, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1958 // If the operation is with the result of a phi instruction, check whether
1959 // operating on all incoming values of the phi always yields the same value.
1960 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1961 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
1967 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1968 return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit);
1971 /// Given operands for a Xor, see if we can fold the result.
1972 /// If not, this returns null.
1973 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1974 unsigned MaxRecurse) {
1975 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
1978 // A ^ undef -> undef
1979 if (match(Op1, m_Undef()))
1983 if (match(Op1, m_Zero()))
1988 return Constant::getNullValue(Op0->getType());
1990 // A ^ ~A = ~A ^ A = -1
1991 if (match(Op0, m_Not(m_Specific(Op1))) ||
1992 match(Op1, m_Not(m_Specific(Op0))))
1993 return Constant::getAllOnesValue(Op0->getType());
1995 // Try some generic simplifications for associative operations.
1996 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
2000 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2001 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2002 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2003 // only if B and C are equal. If B and C are equal then (since we assume
2004 // that operands have already been simplified) "select(cond, B, C)" should
2005 // have been simplified to the common value of B and C already. Analysing
2006 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2007 // for threading over phi nodes.
2012 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2013 return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit);
2017 static Type *GetCompareTy(Value *Op) {
2018 return CmpInst::makeCmpResultType(Op->getType());
2021 /// Rummage around inside V looking for something equivalent to the comparison
2022 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2023 /// Helper function for analyzing max/min idioms.
2024 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2025 Value *LHS, Value *RHS) {
2026 SelectInst *SI = dyn_cast<SelectInst>(V);
2029 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2032 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2033 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2035 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2036 LHS == CmpRHS && RHS == CmpLHS)
2041 // A significant optimization not implemented here is assuming that alloca
2042 // addresses are not equal to incoming argument values. They don't *alias*,
2043 // as we say, but that doesn't mean they aren't equal, so we take a
2044 // conservative approach.
2046 // This is inspired in part by C++11 5.10p1:
2047 // "Two pointers of the same type compare equal if and only if they are both
2048 // null, both point to the same function, or both represent the same
2051 // This is pretty permissive.
2053 // It's also partly due to C11 6.5.9p6:
2054 // "Two pointers compare equal if and only if both are null pointers, both are
2055 // pointers to the same object (including a pointer to an object and a
2056 // subobject at its beginning) or function, both are pointers to one past the
2057 // last element of the same array object, or one is a pointer to one past the
2058 // end of one array object and the other is a pointer to the start of a
2059 // different array object that happens to immediately follow the first array
2060 // object in the address space.)
2062 // C11's version is more restrictive, however there's no reason why an argument
2063 // couldn't be a one-past-the-end value for a stack object in the caller and be
2064 // equal to the beginning of a stack object in the callee.
2066 // If the C and C++ standards are ever made sufficiently restrictive in this
2067 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2068 // this optimization.
2070 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
2071 const DominatorTree *DT, CmpInst::Predicate Pred,
2072 const Instruction *CxtI, Value *LHS, Value *RHS) {
2073 // First, skip past any trivial no-ops.
2074 LHS = LHS->stripPointerCasts();
2075 RHS = RHS->stripPointerCasts();
2077 // A non-null pointer is not equal to a null pointer.
2078 if (llvm::isKnownNonNull(LHS) && isa<ConstantPointerNull>(RHS) &&
2079 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
2080 return ConstantInt::get(GetCompareTy(LHS),
2081 !CmpInst::isTrueWhenEqual(Pred));
2083 // We can only fold certain predicates on pointer comparisons.
2088 // Equality comaprisons are easy to fold.
2089 case CmpInst::ICMP_EQ:
2090 case CmpInst::ICMP_NE:
2093 // We can only handle unsigned relational comparisons because 'inbounds' on
2094 // a GEP only protects against unsigned wrapping.
2095 case CmpInst::ICMP_UGT:
2096 case CmpInst::ICMP_UGE:
2097 case CmpInst::ICMP_ULT:
2098 case CmpInst::ICMP_ULE:
2099 // However, we have to switch them to their signed variants to handle
2100 // negative indices from the base pointer.
2101 Pred = ICmpInst::getSignedPredicate(Pred);
2105 // Strip off any constant offsets so that we can reason about them.
2106 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2107 // here and compare base addresses like AliasAnalysis does, however there are
2108 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2109 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2110 // doesn't need to guarantee pointer inequality when it says NoAlias.
2111 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
2112 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
2114 // If LHS and RHS are related via constant offsets to the same base
2115 // value, we can replace it with an icmp which just compares the offsets.
2117 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset);
2119 // Various optimizations for (in)equality comparisons.
2120 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2121 // Different non-empty allocations that exist at the same time have
2122 // different addresses (if the program can tell). Global variables always
2123 // exist, so they always exist during the lifetime of each other and all
2124 // allocas. Two different allocas usually have different addresses...
2126 // However, if there's an @llvm.stackrestore dynamically in between two
2127 // allocas, they may have the same address. It's tempting to reduce the
2128 // scope of the problem by only looking at *static* allocas here. That would
2129 // cover the majority of allocas while significantly reducing the likelihood
2130 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2131 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2132 // an entry block. Also, if we have a block that's not attached to a
2133 // function, we can't tell if it's "static" under the current definition.
2134 // Theoretically, this problem could be fixed by creating a new kind of
2135 // instruction kind specifically for static allocas. Such a new instruction
2136 // could be required to be at the top of the entry block, thus preventing it
2137 // from being subject to a @llvm.stackrestore. Instcombine could even
2138 // convert regular allocas into these special allocas. It'd be nifty.
2139 // However, until then, this problem remains open.
2141 // So, we'll assume that two non-empty allocas have different addresses
2144 // With all that, if the offsets are within the bounds of their allocations
2145 // (and not one-past-the-end! so we can't use inbounds!), and their
2146 // allocations aren't the same, the pointers are not equal.
2148 // Note that it's not necessary to check for LHS being a global variable
2149 // address, due to canonicalization and constant folding.
2150 if (isa<AllocaInst>(LHS) &&
2151 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
2152 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset);
2153 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
2154 uint64_t LHSSize, RHSSize;
2155 if (LHSOffsetCI && RHSOffsetCI &&
2156 getObjectSize(LHS, LHSSize, DL, TLI) &&
2157 getObjectSize(RHS, RHSSize, DL, TLI)) {
2158 const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
2159 const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
2160 if (!LHSOffsetValue.isNegative() &&
2161 !RHSOffsetValue.isNegative() &&
2162 LHSOffsetValue.ult(LHSSize) &&
2163 RHSOffsetValue.ult(RHSSize)) {
2164 return ConstantInt::get(GetCompareTy(LHS),
2165 !CmpInst::isTrueWhenEqual(Pred));
2169 // Repeat the above check but this time without depending on DataLayout
2170 // or being able to compute a precise size.
2171 if (!cast<PointerType>(LHS->getType())->isEmptyTy() &&
2172 !cast<PointerType>(RHS->getType())->isEmptyTy() &&
2173 LHSOffset->isNullValue() &&
2174 RHSOffset->isNullValue())
2175 return ConstantInt::get(GetCompareTy(LHS),
2176 !CmpInst::isTrueWhenEqual(Pred));
2179 // Even if an non-inbounds GEP occurs along the path we can still optimize
2180 // equality comparisons concerning the result. We avoid walking the whole
2181 // chain again by starting where the last calls to
2182 // stripAndComputeConstantOffsets left off and accumulate the offsets.
2183 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
2184 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
2186 return ConstantExpr::getICmp(Pred,
2187 ConstantExpr::getAdd(LHSOffset, LHSNoBound),
2188 ConstantExpr::getAdd(RHSOffset, RHSNoBound));
2190 // If one side of the equality comparison must come from a noalias call
2191 // (meaning a system memory allocation function), and the other side must
2192 // come from a pointer that cannot overlap with dynamically-allocated
2193 // memory within the lifetime of the current function (allocas, byval
2194 // arguments, globals), then determine the comparison result here.
2195 SmallVector<Value *, 8> LHSUObjs, RHSUObjs;
2196 GetUnderlyingObjects(LHS, LHSUObjs, DL);
2197 GetUnderlyingObjects(RHS, RHSUObjs, DL);
2199 // Is the set of underlying objects all noalias calls?
2200 auto IsNAC = [](ArrayRef<Value *> Objects) {
2201 return all_of(Objects, isNoAliasCall);
2204 // Is the set of underlying objects all things which must be disjoint from
2205 // noalias calls. For allocas, we consider only static ones (dynamic
2206 // allocas might be transformed into calls to malloc not simultaneously
2207 // live with the compared-to allocation). For globals, we exclude symbols
2208 // that might be resolve lazily to symbols in another dynamically-loaded
2209 // library (and, thus, could be malloc'ed by the implementation).
2210 auto IsAllocDisjoint = [](ArrayRef<Value *> Objects) {
2211 return all_of(Objects, [](Value *V) {
2212 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2213 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2214 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2215 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2216 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2217 !GV->isThreadLocal();
2218 if (const Argument *A = dyn_cast<Argument>(V))
2219 return A->hasByValAttr();
2224 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2225 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2226 return ConstantInt::get(GetCompareTy(LHS),
2227 !CmpInst::isTrueWhenEqual(Pred));
2229 // Fold comparisons for non-escaping pointer even if the allocation call
2230 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2231 // dynamic allocation call could be either of the operands.
2232 Value *MI = nullptr;
2233 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT))
2235 else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonNullAt(LHS, CxtI, DT))
2237 // FIXME: We should also fold the compare when the pointer escapes, but the
2238 // compare dominates the pointer escape
2239 if (MI && !PointerMayBeCaptured(MI, true, true))
2240 return ConstantInt::get(GetCompareTy(LHS),
2241 CmpInst::isFalseWhenEqual(Pred));
2248 /// Fold an icmp when its operands have i1 scalar type.
2249 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2250 Value *RHS, const SimplifyQuery &Q) {
2251 Type *ITy = GetCompareTy(LHS); // The return type.
2252 Type *OpTy = LHS->getType(); // The operand type.
2253 if (!OpTy->getScalarType()->isIntegerTy(1))
2256 // A boolean compared to true/false can be simplified in 14 out of the 20
2257 // (10 predicates * 2 constants) possible combinations. Cases not handled here
2258 // require a 'not' of the LHS, so those must be transformed in InstCombine.
2259 if (match(RHS, m_Zero())) {
2261 case CmpInst::ICMP_NE: // X != 0 -> X
2262 case CmpInst::ICMP_UGT: // X >u 0 -> X
2263 case CmpInst::ICMP_SLT: // X <s 0 -> X
2266 case CmpInst::ICMP_ULT: // X <u 0 -> false
2267 case CmpInst::ICMP_SGT: // X >s 0 -> false
2268 return getFalse(ITy);
2270 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2271 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2272 return getTrue(ITy);
2276 } else if (match(RHS, m_One())) {
2278 case CmpInst::ICMP_EQ: // X == 1 -> X
2279 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2280 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2283 case CmpInst::ICMP_UGT: // X >u 1 -> false
2284 case CmpInst::ICMP_SLT: // X <s -1 -> false
2285 return getFalse(ITy);
2287 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2288 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2289 return getTrue(ITy);
2298 case ICmpInst::ICMP_UGE:
2299 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false))
2300 return getTrue(ITy);
2302 case ICmpInst::ICMP_SGE:
2303 /// For signed comparison, the values for an i1 are 0 and -1
2304 /// respectively. This maps into a truth table of:
2305 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2306 /// 0 | 0 | 1 (0 >= 0) | 1
2307 /// 0 | 1 | 1 (0 >= -1) | 1
2308 /// 1 | 0 | 0 (-1 >= 0) | 0
2309 /// 1 | 1 | 1 (-1 >= -1) | 1
2310 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2311 return getTrue(ITy);
2313 case ICmpInst::ICMP_ULE:
2314 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2315 return getTrue(ITy);
2322 /// Try hard to fold icmp with zero RHS because this is a common case.
2323 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2324 Value *RHS, const SimplifyQuery &Q) {
2325 if (!match(RHS, m_Zero()))
2328 Type *ITy = GetCompareTy(LHS); // The return type.
2331 llvm_unreachable("Unknown ICmp predicate!");
2332 case ICmpInst::ICMP_ULT:
2333 return getFalse(ITy);
2334 case ICmpInst::ICMP_UGE:
2335 return getTrue(ITy);
2336 case ICmpInst::ICMP_EQ:
2337 case ICmpInst::ICMP_ULE:
2338 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2339 return getFalse(ITy);
2341 case ICmpInst::ICMP_NE:
2342 case ICmpInst::ICMP_UGT:
2343 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2344 return getTrue(ITy);
2346 case ICmpInst::ICMP_SLT: {
2347 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2348 if (LHSKnown.isNegative())
2349 return getTrue(ITy);
2350 if (LHSKnown.isNonNegative())
2351 return getFalse(ITy);
2354 case ICmpInst::ICMP_SLE: {
2355 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2356 if (LHSKnown.isNegative())
2357 return getTrue(ITy);
2358 if (LHSKnown.isNonNegative() &&
2359 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2360 return getFalse(ITy);
2363 case ICmpInst::ICMP_SGE: {
2364 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2365 if (LHSKnown.isNegative())
2366 return getFalse(ITy);
2367 if (LHSKnown.isNonNegative())
2368 return getTrue(ITy);
2371 case ICmpInst::ICMP_SGT: {
2372 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2373 if (LHSKnown.isNegative())
2374 return getFalse(ITy);
2375 if (LHSKnown.isNonNegative() &&
2376 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2377 return getTrue(ITy);
2385 /// Many binary operators with a constant operand have an easy-to-compute
2386 /// range of outputs. This can be used to fold a comparison to always true or
2388 static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper) {
2389 unsigned Width = Lower.getBitWidth();
2391 switch (BO.getOpcode()) {
2392 case Instruction::Add:
2393 if (match(BO.getOperand(1), m_APInt(C)) && *C != 0) {
2394 // FIXME: If we have both nuw and nsw, we should reduce the range further.
2395 if (BO.hasNoUnsignedWrap()) {
2396 // 'add nuw x, C' produces [C, UINT_MAX].
2398 } else if (BO.hasNoSignedWrap()) {
2399 if (C->isNegative()) {
2400 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
2401 Lower = APInt::getSignedMinValue(Width);
2402 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
2404 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
2405 Lower = APInt::getSignedMinValue(Width) + *C;
2406 Upper = APInt::getSignedMaxValue(Width) + 1;
2412 case Instruction::And:
2413 if (match(BO.getOperand(1), m_APInt(C)))
2414 // 'and x, C' produces [0, C].
2418 case Instruction::Or:
2419 if (match(BO.getOperand(1), m_APInt(C)))
2420 // 'or x, C' produces [C, UINT_MAX].
2424 case Instruction::AShr:
2425 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
2426 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
2427 Lower = APInt::getSignedMinValue(Width).ashr(*C);
2428 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
2429 } else if (match(BO.getOperand(0), m_APInt(C))) {
2430 unsigned ShiftAmount = Width - 1;
2431 if (*C != 0 && BO.isExact())
2432 ShiftAmount = C->countTrailingZeros();
2433 if (C->isNegative()) {
2434 // 'ashr C, x' produces [C, C >> (Width-1)]
2436 Upper = C->ashr(ShiftAmount) + 1;
2438 // 'ashr C, x' produces [C >> (Width-1), C]
2439 Lower = C->ashr(ShiftAmount);
2445 case Instruction::LShr:
2446 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
2447 // 'lshr x, C' produces [0, UINT_MAX >> C].
2448 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
2449 } else if (match(BO.getOperand(0), m_APInt(C))) {
2450 // 'lshr C, x' produces [C >> (Width-1), C].
2451 unsigned ShiftAmount = Width - 1;
2452 if (*C != 0 && BO.isExact())
2453 ShiftAmount = C->countTrailingZeros();
2454 Lower = C->lshr(ShiftAmount);
2459 case Instruction::Shl:
2460 if (match(BO.getOperand(0), m_APInt(C))) {
2461 if (BO.hasNoUnsignedWrap()) {
2462 // 'shl nuw C, x' produces [C, C << CLZ(C)]
2464 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
2465 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
2466 if (C->isNegative()) {
2467 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
2468 unsigned ShiftAmount = C->countLeadingOnes() - 1;
2469 Lower = C->shl(ShiftAmount);
2472 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
2473 unsigned ShiftAmount = C->countLeadingZeros() - 1;
2475 Upper = C->shl(ShiftAmount) + 1;
2481 case Instruction::SDiv:
2482 if (match(BO.getOperand(1), m_APInt(C))) {
2483 APInt IntMin = APInt::getSignedMinValue(Width);
2484 APInt IntMax = APInt::getSignedMaxValue(Width);
2485 if (C->isAllOnesValue()) {
2486 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
2487 // where C != -1 and C != 0 and C != 1
2490 } else if (C->countLeadingZeros() < Width - 1) {
2491 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
2492 // where C != -1 and C != 0 and C != 1
2493 Lower = IntMin.sdiv(*C);
2494 Upper = IntMax.sdiv(*C);
2495 if (Lower.sgt(Upper))
2496 std::swap(Lower, Upper);
2498 assert(Upper != Lower && "Upper part of range has wrapped!");
2500 } else if (match(BO.getOperand(0), m_APInt(C))) {
2501 if (C->isMinSignedValue()) {
2502 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
2504 Upper = Lower.lshr(1) + 1;
2506 // 'sdiv C, x' produces [-|C|, |C|].
2507 Upper = C->abs() + 1;
2508 Lower = (-Upper) + 1;
2513 case Instruction::UDiv:
2514 if (match(BO.getOperand(1), m_APInt(C)) && *C != 0) {
2515 // 'udiv x, C' produces [0, UINT_MAX / C].
2516 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
2517 } else if (match(BO.getOperand(0), m_APInt(C))) {
2518 // 'udiv C, x' produces [0, C].
2523 case Instruction::SRem:
2524 if (match(BO.getOperand(1), m_APInt(C))) {
2525 // 'srem x, C' produces (-|C|, |C|).
2527 Lower = (-Upper) + 1;
2531 case Instruction::URem:
2532 if (match(BO.getOperand(1), m_APInt(C)))
2533 // 'urem x, C' produces [0, C).
2542 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
2545 if (!match(RHS, m_APInt(C)))
2548 // Rule out tautological comparisons (eg., ult 0 or uge 0).
2549 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
2550 if (RHS_CR.isEmptySet())
2551 return ConstantInt::getFalse(GetCompareTy(RHS));
2552 if (RHS_CR.isFullSet())
2553 return ConstantInt::getTrue(GetCompareTy(RHS));
2555 // Find the range of possible values for binary operators.
2556 unsigned Width = C->getBitWidth();
2557 APInt Lower = APInt(Width, 0);
2558 APInt Upper = APInt(Width, 0);
2559 if (auto *BO = dyn_cast<BinaryOperator>(LHS))
2560 setLimitsForBinOp(*BO, Lower, Upper);
2562 ConstantRange LHS_CR =
2563 Lower != Upper ? ConstantRange(Lower, Upper) : ConstantRange(Width, true);
2565 if (auto *I = dyn_cast<Instruction>(LHS))
2566 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
2567 LHS_CR = LHS_CR.intersectWith(getConstantRangeFromMetadata(*Ranges));
2569 if (!LHS_CR.isFullSet()) {
2570 if (RHS_CR.contains(LHS_CR))
2571 return ConstantInt::getTrue(GetCompareTy(RHS));
2572 if (RHS_CR.inverse().contains(LHS_CR))
2573 return ConstantInt::getFalse(GetCompareTy(RHS));
2579 /// TODO: A large part of this logic is duplicated in InstCombine's
2580 /// foldICmpBinOp(). We should be able to share that and avoid the code
2582 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
2583 Value *RHS, const SimplifyQuery &Q,
2584 unsigned MaxRecurse) {
2585 Type *ITy = GetCompareTy(LHS); // The return type.
2587 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
2588 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
2589 if (MaxRecurse && (LBO || RBO)) {
2590 // Analyze the case when either LHS or RHS is an add instruction.
2591 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2592 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
2593 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
2594 if (LBO && LBO->getOpcode() == Instruction::Add) {
2595 A = LBO->getOperand(0);
2596 B = LBO->getOperand(1);
2598 ICmpInst::isEquality(Pred) ||
2599 (CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) ||
2600 (CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap());
2602 if (RBO && RBO->getOpcode() == Instruction::Add) {
2603 C = RBO->getOperand(0);
2604 D = RBO->getOperand(1);
2606 ICmpInst::isEquality(Pred) ||
2607 (CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) ||
2608 (CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap());
2611 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2612 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
2613 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
2614 Constant::getNullValue(RHS->getType()), Q,
2618 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2619 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
2621 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
2622 C == LHS ? D : C, Q, MaxRecurse - 1))
2625 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
2626 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem &&
2628 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2631 // C + B == C + D -> B == D
2634 } else if (A == D) {
2635 // D + B == C + D -> B == C
2638 } else if (B == C) {
2639 // A + C == C + D -> A == D
2644 // A + D == C + D -> A == C
2648 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
2655 // icmp pred (or X, Y), X
2656 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
2657 if (Pred == ICmpInst::ICMP_ULT)
2658 return getFalse(ITy);
2659 if (Pred == ICmpInst::ICMP_UGE)
2660 return getTrue(ITy);
2662 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2663 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2664 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2665 if (RHSKnown.isNonNegative() && YKnown.isNegative())
2666 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
2667 if (RHSKnown.isNegative() || YKnown.isNonNegative())
2668 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
2671 // icmp pred X, (or X, Y)
2672 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) {
2673 if (Pred == ICmpInst::ICMP_ULE)
2674 return getTrue(ITy);
2675 if (Pred == ICmpInst::ICMP_UGT)
2676 return getFalse(ITy);
2678 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) {
2679 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2680 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2681 if (LHSKnown.isNonNegative() && YKnown.isNegative())
2682 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy);
2683 if (LHSKnown.isNegative() || YKnown.isNonNegative())
2684 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy);
2689 // icmp pred (and X, Y), X
2690 if (LBO && match(LBO, m_CombineOr(m_And(m_Value(), m_Specific(RHS)),
2691 m_And(m_Specific(RHS), m_Value())))) {
2692 if (Pred == ICmpInst::ICMP_UGT)
2693 return getFalse(ITy);
2694 if (Pred == ICmpInst::ICMP_ULE)
2695 return getTrue(ITy);
2697 // icmp pred X, (and X, Y)
2698 if (RBO && match(RBO, m_CombineOr(m_And(m_Value(), m_Specific(LHS)),
2699 m_And(m_Specific(LHS), m_Value())))) {
2700 if (Pred == ICmpInst::ICMP_UGE)
2701 return getTrue(ITy);
2702 if (Pred == ICmpInst::ICMP_ULT)
2703 return getFalse(ITy);
2706 // 0 - (zext X) pred C
2707 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
2708 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
2709 if (RHSC->getValue().isStrictlyPositive()) {
2710 if (Pred == ICmpInst::ICMP_SLT)
2711 return ConstantInt::getTrue(RHSC->getContext());
2712 if (Pred == ICmpInst::ICMP_SGE)
2713 return ConstantInt::getFalse(RHSC->getContext());
2714 if (Pred == ICmpInst::ICMP_EQ)
2715 return ConstantInt::getFalse(RHSC->getContext());
2716 if (Pred == ICmpInst::ICMP_NE)
2717 return ConstantInt::getTrue(RHSC->getContext());
2719 if (RHSC->getValue().isNonNegative()) {
2720 if (Pred == ICmpInst::ICMP_SLE)
2721 return ConstantInt::getTrue(RHSC->getContext());
2722 if (Pred == ICmpInst::ICMP_SGT)
2723 return ConstantInt::getFalse(RHSC->getContext());
2728 // icmp pred (urem X, Y), Y
2729 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
2733 case ICmpInst::ICMP_SGT:
2734 case ICmpInst::ICMP_SGE: {
2735 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2736 if (!Known.isNonNegative())
2740 case ICmpInst::ICMP_EQ:
2741 case ICmpInst::ICMP_UGT:
2742 case ICmpInst::ICMP_UGE:
2743 return getFalse(ITy);
2744 case ICmpInst::ICMP_SLT:
2745 case ICmpInst::ICMP_SLE: {
2746 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2747 if (!Known.isNonNegative())
2751 case ICmpInst::ICMP_NE:
2752 case ICmpInst::ICMP_ULT:
2753 case ICmpInst::ICMP_ULE:
2754 return getTrue(ITy);
2758 // icmp pred X, (urem Y, X)
2759 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) {
2763 case ICmpInst::ICMP_SGT:
2764 case ICmpInst::ICMP_SGE: {
2765 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2766 if (!Known.isNonNegative())
2770 case ICmpInst::ICMP_NE:
2771 case ICmpInst::ICMP_UGT:
2772 case ICmpInst::ICMP_UGE:
2773 return getTrue(ITy);
2774 case ICmpInst::ICMP_SLT:
2775 case ICmpInst::ICMP_SLE: {
2776 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2777 if (!Known.isNonNegative())
2781 case ICmpInst::ICMP_EQ:
2782 case ICmpInst::ICMP_ULT:
2783 case ICmpInst::ICMP_ULE:
2784 return getFalse(ITy);
2790 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
2791 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) {
2792 // icmp pred (X op Y), X
2793 if (Pred == ICmpInst::ICMP_UGT)
2794 return getFalse(ITy);
2795 if (Pred == ICmpInst::ICMP_ULE)
2796 return getTrue(ITy);
2801 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) ||
2802 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) {
2803 // icmp pred X, (X op Y)
2804 if (Pred == ICmpInst::ICMP_ULT)
2805 return getFalse(ITy);
2806 if (Pred == ICmpInst::ICMP_UGE)
2807 return getTrue(ITy);
2814 // where CI2 is a power of 2 and CI isn't
2815 if (auto *CI = dyn_cast<ConstantInt>(RHS)) {
2816 const APInt *CI2Val, *CIVal = &CI->getValue();
2817 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) &&
2818 CI2Val->isPowerOf2()) {
2819 if (!CIVal->isPowerOf2()) {
2820 // CI2 << X can equal zero in some circumstances,
2821 // this simplification is unsafe if CI is zero.
2823 // We know it is safe if:
2824 // - The shift is nsw, we can't shift out the one bit.
2825 // - The shift is nuw, we can't shift out the one bit.
2828 if (LBO->hasNoSignedWrap() || LBO->hasNoUnsignedWrap() ||
2829 *CI2Val == 1 || !CI->isZero()) {
2830 if (Pred == ICmpInst::ICMP_EQ)
2831 return ConstantInt::getFalse(RHS->getContext());
2832 if (Pred == ICmpInst::ICMP_NE)
2833 return ConstantInt::getTrue(RHS->getContext());
2836 if (CIVal->isSignMask() && *CI2Val == 1) {
2837 if (Pred == ICmpInst::ICMP_UGT)
2838 return ConstantInt::getFalse(RHS->getContext());
2839 if (Pred == ICmpInst::ICMP_ULE)
2840 return ConstantInt::getTrue(RHS->getContext());
2845 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
2846 LBO->getOperand(1) == RBO->getOperand(1)) {
2847 switch (LBO->getOpcode()) {
2850 case Instruction::UDiv:
2851 case Instruction::LShr:
2852 if (ICmpInst::isSigned(Pred) || !LBO->isExact() || !RBO->isExact())
2854 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2855 RBO->getOperand(0), Q, MaxRecurse - 1))
2858 case Instruction::SDiv:
2859 if (!ICmpInst::isEquality(Pred) || !LBO->isExact() || !RBO->isExact())
2861 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2862 RBO->getOperand(0), Q, MaxRecurse - 1))
2865 case Instruction::AShr:
2866 if (!LBO->isExact() || !RBO->isExact())
2868 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2869 RBO->getOperand(0), Q, MaxRecurse - 1))
2872 case Instruction::Shl: {
2873 bool NUW = LBO->hasNoUnsignedWrap() && RBO->hasNoUnsignedWrap();
2874 bool NSW = LBO->hasNoSignedWrap() && RBO->hasNoSignedWrap();
2877 if (!NSW && ICmpInst::isSigned(Pred))
2879 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2880 RBO->getOperand(0), Q, MaxRecurse - 1))
2889 /// Simplify integer comparisons where at least one operand of the compare
2890 /// matches an integer min/max idiom.
2891 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
2892 Value *RHS, const SimplifyQuery &Q,
2893 unsigned MaxRecurse) {
2894 Type *ITy = GetCompareTy(LHS); // The return type.
2896 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
2897 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
2899 // Signed variants on "max(a,b)>=a -> true".
2900 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
2902 std::swap(A, B); // smax(A, B) pred A.
2903 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
2904 // We analyze this as smax(A, B) pred A.
2906 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
2907 (A == LHS || B == LHS)) {
2909 std::swap(A, B); // A pred smax(A, B).
2910 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
2911 // We analyze this as smax(A, B) swapped-pred A.
2912 P = CmpInst::getSwappedPredicate(Pred);
2913 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
2914 (A == RHS || B == RHS)) {
2916 std::swap(A, B); // smin(A, B) pred A.
2917 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
2918 // We analyze this as smax(-A, -B) swapped-pred -A.
2919 // Note that we do not need to actually form -A or -B thanks to EqP.
2920 P = CmpInst::getSwappedPredicate(Pred);
2921 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
2922 (A == LHS || B == LHS)) {
2924 std::swap(A, B); // A pred smin(A, B).
2925 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
2926 // We analyze this as smax(-A, -B) pred -A.
2927 // Note that we do not need to actually form -A or -B thanks to EqP.
2930 if (P != CmpInst::BAD_ICMP_PREDICATE) {
2931 // Cases correspond to "max(A, B) p A".
2935 case CmpInst::ICMP_EQ:
2936 case CmpInst::ICMP_SLE:
2937 // Equivalent to "A EqP B". This may be the same as the condition tested
2938 // in the max/min; if so, we can just return that.
2939 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
2941 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
2943 // Otherwise, see if "A EqP B" simplifies.
2945 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
2948 case CmpInst::ICMP_NE:
2949 case CmpInst::ICMP_SGT: {
2950 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
2951 // Equivalent to "A InvEqP B". This may be the same as the condition
2952 // tested in the max/min; if so, we can just return that.
2953 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
2955 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
2957 // Otherwise, see if "A InvEqP B" simplifies.
2959 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
2963 case CmpInst::ICMP_SGE:
2965 return getTrue(ITy);
2966 case CmpInst::ICMP_SLT:
2968 return getFalse(ITy);
2972 // Unsigned variants on "max(a,b)>=a -> true".
2973 P = CmpInst::BAD_ICMP_PREDICATE;
2974 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
2976 std::swap(A, B); // umax(A, B) pred A.
2977 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
2978 // We analyze this as umax(A, B) pred A.
2980 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
2981 (A == LHS || B == LHS)) {
2983 std::swap(A, B); // A pred umax(A, B).
2984 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
2985 // We analyze this as umax(A, B) swapped-pred A.
2986 P = CmpInst::getSwappedPredicate(Pred);
2987 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
2988 (A == RHS || B == RHS)) {
2990 std::swap(A, B); // umin(A, B) pred A.
2991 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
2992 // We analyze this as umax(-A, -B) swapped-pred -A.
2993 // Note that we do not need to actually form -A or -B thanks to EqP.
2994 P = CmpInst::getSwappedPredicate(Pred);
2995 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
2996 (A == LHS || B == LHS)) {
2998 std::swap(A, B); // A pred umin(A, B).
2999 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3000 // We analyze this as umax(-A, -B) pred -A.
3001 // Note that we do not need to actually form -A or -B thanks to EqP.
3004 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3005 // Cases correspond to "max(A, B) p A".
3009 case CmpInst::ICMP_EQ:
3010 case CmpInst::ICMP_ULE:
3011 // Equivalent to "A EqP B". This may be the same as the condition tested
3012 // in the max/min; if so, we can just return that.
3013 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3015 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3017 // Otherwise, see if "A EqP B" simplifies.
3019 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3022 case CmpInst::ICMP_NE:
3023 case CmpInst::ICMP_UGT: {
3024 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3025 // Equivalent to "A InvEqP B". This may be the same as the condition
3026 // tested in the max/min; if so, we can just return that.
3027 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3029 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3031 // Otherwise, see if "A InvEqP B" simplifies.
3033 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3037 case CmpInst::ICMP_UGE:
3039 return getTrue(ITy);
3040 case CmpInst::ICMP_ULT:
3042 return getFalse(ITy);
3046 // Variants on "max(x,y) >= min(x,z)".
3048 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3049 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3050 (A == C || A == D || B == C || B == D)) {
3051 // max(x, ?) pred min(x, ?).
3052 if (Pred == CmpInst::ICMP_SGE)
3054 return getTrue(ITy);
3055 if (Pred == CmpInst::ICMP_SLT)
3057 return getFalse(ITy);
3058 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3059 match(RHS, m_SMax(m_Value(C), m_Value(D))) &&
3060 (A == C || A == D || B == C || B == D)) {
3061 // min(x, ?) pred max(x, ?).
3062 if (Pred == CmpInst::ICMP_SLE)
3064 return getTrue(ITy);
3065 if (Pred == CmpInst::ICMP_SGT)
3067 return getFalse(ITy);
3068 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3069 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3070 (A == C || A == D || B == C || B == D)) {
3071 // max(x, ?) pred min(x, ?).
3072 if (Pred == CmpInst::ICMP_UGE)
3074 return getTrue(ITy);
3075 if (Pred == CmpInst::ICMP_ULT)
3077 return getFalse(ITy);
3078 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3079 match(RHS, m_UMax(m_Value(C), m_Value(D))) &&
3080 (A == C || A == D || B == C || B == D)) {
3081 // min(x, ?) pred max(x, ?).
3082 if (Pred == CmpInst::ICMP_ULE)
3084 return getTrue(ITy);
3085 if (Pred == CmpInst::ICMP_UGT)
3087 return getFalse(ITy);
3093 /// Given operands for an ICmpInst, see if we can fold the result.
3094 /// If not, this returns null.
3095 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3096 const SimplifyQuery &Q, unsigned MaxRecurse) {
3097 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3098 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3100 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3101 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3102 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3104 // If we have a constant, make sure it is on the RHS.
3105 std::swap(LHS, RHS);
3106 Pred = CmpInst::getSwappedPredicate(Pred);
3109 Type *ITy = GetCompareTy(LHS); // The return type.
3111 // icmp X, X -> true/false
3112 // X icmp undef -> true/false. For example, icmp ugt %X, undef -> false
3113 // because X could be 0.
3114 if (LHS == RHS || isa<UndefValue>(RHS))
3115 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3117 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3120 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3123 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS))
3126 // If both operands have range metadata, use the metadata
3127 // to simplify the comparison.
3128 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3129 auto RHS_Instr = cast<Instruction>(RHS);
3130 auto LHS_Instr = cast<Instruction>(LHS);
3132 if (RHS_Instr->getMetadata(LLVMContext::MD_range) &&
3133 LHS_Instr->getMetadata(LLVMContext::MD_range)) {
3134 auto RHS_CR = getConstantRangeFromMetadata(
3135 *RHS_Instr->getMetadata(LLVMContext::MD_range));
3136 auto LHS_CR = getConstantRangeFromMetadata(
3137 *LHS_Instr->getMetadata(LLVMContext::MD_range));
3139 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR);
3140 if (Satisfied_CR.contains(LHS_CR))
3141 return ConstantInt::getTrue(RHS->getContext());
3143 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion(
3144 CmpInst::getInversePredicate(Pred), RHS_CR);
3145 if (InversedSatisfied_CR.contains(LHS_CR))
3146 return ConstantInt::getFalse(RHS->getContext());
3150 // Compare of cast, for example (zext X) != 0 -> X != 0
3151 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3152 Instruction *LI = cast<CastInst>(LHS);
3153 Value *SrcOp = LI->getOperand(0);
3154 Type *SrcTy = SrcOp->getType();
3155 Type *DstTy = LI->getType();
3157 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3158 // if the integer type is the same size as the pointer type.
3159 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3160 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3161 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3162 // Transfer the cast to the constant.
3163 if (Value *V = SimplifyICmpInst(Pred, SrcOp,
3164 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3167 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3168 if (RI->getOperand(0)->getType() == SrcTy)
3169 // Compare without the cast.
3170 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3176 if (isa<ZExtInst>(LHS)) {
3177 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3179 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3180 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3181 // Compare X and Y. Note that signed predicates become unsigned.
3182 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3183 SrcOp, RI->getOperand(0), Q,
3187 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3188 // too. If not, then try to deduce the result of the comparison.
3189 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3190 // Compute the constant that would happen if we truncated to SrcTy then
3191 // reextended to DstTy.
3192 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3193 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3195 // If the re-extended constant didn't change then this is effectively
3196 // also a case of comparing two zero-extended values.
3197 if (RExt == CI && MaxRecurse)
3198 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3199 SrcOp, Trunc, Q, MaxRecurse-1))
3202 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3203 // there. Use this to work out the result of the comparison.
3206 default: llvm_unreachable("Unknown ICmp predicate!");
3208 case ICmpInst::ICMP_EQ:
3209 case ICmpInst::ICMP_UGT:
3210 case ICmpInst::ICMP_UGE:
3211 return ConstantInt::getFalse(CI->getContext());
3213 case ICmpInst::ICMP_NE:
3214 case ICmpInst::ICMP_ULT:
3215 case ICmpInst::ICMP_ULE:
3216 return ConstantInt::getTrue(CI->getContext());
3218 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3219 // is non-negative then LHS <s RHS.
3220 case ICmpInst::ICMP_SGT:
3221 case ICmpInst::ICMP_SGE:
3222 return CI->getValue().isNegative() ?
3223 ConstantInt::getTrue(CI->getContext()) :
3224 ConstantInt::getFalse(CI->getContext());
3226 case ICmpInst::ICMP_SLT:
3227 case ICmpInst::ICMP_SLE:
3228 return CI->getValue().isNegative() ?
3229 ConstantInt::getFalse(CI->getContext()) :
3230 ConstantInt::getTrue(CI->getContext());
3236 if (isa<SExtInst>(LHS)) {
3237 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3239 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3240 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3241 // Compare X and Y. Note that the predicate does not change.
3242 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3246 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3247 // too. If not, then try to deduce the result of the comparison.
3248 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3249 // Compute the constant that would happen if we truncated to SrcTy then
3250 // reextended to DstTy.
3251 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3252 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3254 // If the re-extended constant didn't change then this is effectively
3255 // also a case of comparing two sign-extended values.
3256 if (RExt == CI && MaxRecurse)
3257 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
3260 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3261 // bits there. Use this to work out the result of the comparison.
3264 default: llvm_unreachable("Unknown ICmp predicate!");
3265 case ICmpInst::ICMP_EQ:
3266 return ConstantInt::getFalse(CI->getContext());
3267 case ICmpInst::ICMP_NE:
3268 return ConstantInt::getTrue(CI->getContext());
3270 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
3272 case ICmpInst::ICMP_SGT:
3273 case ICmpInst::ICMP_SGE:
3274 return CI->getValue().isNegative() ?
3275 ConstantInt::getTrue(CI->getContext()) :
3276 ConstantInt::getFalse(CI->getContext());
3277 case ICmpInst::ICMP_SLT:
3278 case ICmpInst::ICMP_SLE:
3279 return CI->getValue().isNegative() ?
3280 ConstantInt::getFalse(CI->getContext()) :
3281 ConstantInt::getTrue(CI->getContext());
3283 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
3285 case ICmpInst::ICMP_UGT:
3286 case ICmpInst::ICMP_UGE:
3287 // Comparison is true iff the LHS <s 0.
3289 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3290 Constant::getNullValue(SrcTy),
3294 case ICmpInst::ICMP_ULT:
3295 case ICmpInst::ICMP_ULE:
3296 // Comparison is true iff the LHS >=s 0.
3298 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3299 Constant::getNullValue(SrcTy),
3309 // icmp eq|ne X, Y -> false|true if X != Y
3310 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
3311 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT)) {
3312 LLVMContext &Ctx = LHS->getType()->getContext();
3313 return Pred == ICmpInst::ICMP_NE ?
3314 ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
3317 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3320 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3323 // Simplify comparisons of related pointers using a powerful, recursive
3324 // GEP-walk when we have target data available..
3325 if (LHS->getType()->isPointerTy())
3326 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI, LHS, RHS))
3328 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3329 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3330 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3331 Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3332 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3333 Q.DL.getTypeSizeInBits(CRHS->getType()))
3334 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI,
3335 CLHS->getPointerOperand(),
3336 CRHS->getPointerOperand()))
3339 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
3340 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
3341 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
3342 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
3343 (ICmpInst::isEquality(Pred) ||
3344 (GLHS->isInBounds() && GRHS->isInBounds() &&
3345 Pred == ICmpInst::getSignedPredicate(Pred)))) {
3346 // The bases are equal and the indices are constant. Build a constant
3347 // expression GEP with the same indices and a null base pointer to see
3348 // what constant folding can make out of it.
3349 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
3350 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
3351 Constant *NewLHS = ConstantExpr::getGetElementPtr(
3352 GLHS->getSourceElementType(), Null, IndicesLHS);
3354 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
3355 Constant *NewRHS = ConstantExpr::getGetElementPtr(
3356 GLHS->getSourceElementType(), Null, IndicesRHS);
3357 return ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
3362 // If a bit is known to be zero for A and known to be one for B,
3363 // then A and B cannot be equal.
3364 if (ICmpInst::isEquality(Pred)) {
3365 const APInt *RHSVal;
3366 if (match(RHS, m_APInt(RHSVal))) {
3367 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
3368 if (LHSKnown.Zero.intersects(*RHSVal) ||
3369 !LHSKnown.One.isSubsetOf(*RHSVal))
3370 return Pred == ICmpInst::ICMP_EQ ? ConstantInt::getFalse(ITy)
3371 : ConstantInt::getTrue(ITy);
3375 // If the comparison is with the result of a select instruction, check whether
3376 // comparing with either branch of the select always yields the same value.
3377 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3378 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3381 // If the comparison is with the result of a phi instruction, check whether
3382 // doing the compare with each incoming phi value yields a common result.
3383 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3384 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3390 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3391 const SimplifyQuery &Q) {
3392 return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
3395 /// Given operands for an FCmpInst, see if we can fold the result.
3396 /// If not, this returns null.
3397 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3398 FastMathFlags FMF, const SimplifyQuery &Q,
3399 unsigned MaxRecurse) {
3400 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3401 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
3403 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3404 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3405 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3407 // If we have a constant, make sure it is on the RHS.
3408 std::swap(LHS, RHS);
3409 Pred = CmpInst::getSwappedPredicate(Pred);
3412 // Fold trivial predicates.
3413 Type *RetTy = GetCompareTy(LHS);
3414 if (Pred == FCmpInst::FCMP_FALSE)
3415 return getFalse(RetTy);
3416 if (Pred == FCmpInst::FCMP_TRUE)
3417 return getTrue(RetTy);
3419 // UNO/ORD predicates can be trivially folded if NaNs are ignored.
3421 if (Pred == FCmpInst::FCMP_UNO)
3422 return getFalse(RetTy);
3423 if (Pred == FCmpInst::FCMP_ORD)
3424 return getTrue(RetTy);
3427 // fcmp pred x, undef and fcmp pred undef, x
3428 // fold to true if unordered, false if ordered
3429 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) {
3430 // Choosing NaN for the undef will always make unordered comparison succeed
3431 // and ordered comparison fail.
3432 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3435 // fcmp x,x -> true/false. Not all compares are foldable.
3437 if (CmpInst::isTrueWhenEqual(Pred))
3438 return getTrue(RetTy);
3439 if (CmpInst::isFalseWhenEqual(Pred))
3440 return getFalse(RetTy);
3443 // Handle fcmp with constant RHS
3444 const ConstantFP *CFP = nullptr;
3445 if (const auto *RHSC = dyn_cast<Constant>(RHS)) {
3446 if (RHS->getType()->isVectorTy())
3447 CFP = dyn_cast_or_null<ConstantFP>(RHSC->getSplatValue());
3449 CFP = dyn_cast<ConstantFP>(RHSC);
3452 // If the constant is a nan, see if we can fold the comparison based on it.
3453 if (CFP->getValueAPF().isNaN()) {
3454 if (FCmpInst::isOrdered(Pred)) // True "if ordered and foo"
3455 return getFalse(RetTy);
3456 assert(FCmpInst::isUnordered(Pred) &&
3457 "Comparison must be either ordered or unordered!");
3458 // True if unordered.
3459 return getTrue(RetTy);
3461 // Check whether the constant is an infinity.
3462 if (CFP->getValueAPF().isInfinity()) {
3463 if (CFP->getValueAPF().isNegative()) {
3465 case FCmpInst::FCMP_OLT:
3466 // No value is ordered and less than negative infinity.
3467 return getFalse(RetTy);
3468 case FCmpInst::FCMP_UGE:
3469 // All values are unordered with or at least negative infinity.
3470 return getTrue(RetTy);
3476 case FCmpInst::FCMP_OGT:
3477 // No value is ordered and greater than infinity.
3478 return getFalse(RetTy);
3479 case FCmpInst::FCMP_ULE:
3480 // All values are unordered with and at most infinity.
3481 return getTrue(RetTy);
3487 if (CFP->getValueAPF().isZero()) {
3489 case FCmpInst::FCMP_UGE:
3490 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3491 return getTrue(RetTy);
3493 case FCmpInst::FCMP_OLT:
3495 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3496 return getFalse(RetTy);
3504 // If the comparison is with the result of a select instruction, check whether
3505 // comparing with either branch of the select always yields the same value.
3506 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3507 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3510 // If the comparison is with the result of a phi instruction, check whether
3511 // doing the compare with each incoming phi value yields a common result.
3512 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3513 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3519 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3520 FastMathFlags FMF, const SimplifyQuery &Q) {
3521 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
3524 /// See if V simplifies when its operand Op is replaced with RepOp.
3525 static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3526 const SimplifyQuery &Q,
3527 unsigned MaxRecurse) {
3528 // Trivial replacement.
3532 // We cannot replace a constant, and shouldn't even try.
3533 if (isa<Constant>(Op))
3536 auto *I = dyn_cast<Instruction>(V);
3540 // If this is a binary operator, try to simplify it with the replaced op.
3541 if (auto *B = dyn_cast<BinaryOperator>(I)) {
3543 // %cmp = icmp eq i32 %x, 2147483647
3544 // %add = add nsw i32 %x, 1
3545 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
3547 // We can't replace %sel with %add unless we strip away the flags.
3548 if (isa<OverflowingBinaryOperator>(B))
3549 if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap())
3551 if (isa<PossiblyExactOperator>(B))
3556 if (B->getOperand(0) == Op)
3557 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q,
3559 if (B->getOperand(1) == Op)
3560 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q,
3565 // Same for CmpInsts.
3566 if (CmpInst *C = dyn_cast<CmpInst>(I)) {
3568 if (C->getOperand(0) == Op)
3569 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q,
3571 if (C->getOperand(1) == Op)
3572 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q,
3577 // TODO: We could hand off more cases to instsimplify here.
3579 // If all operands are constant after substituting Op for RepOp then we can
3580 // constant fold the instruction.
3581 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) {
3582 // Build a list of all constant operands.
3583 SmallVector<Constant *, 8> ConstOps;
3584 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3585 if (I->getOperand(i) == Op)
3586 ConstOps.push_back(CRepOp);
3587 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i)))
3588 ConstOps.push_back(COp);
3593 // All operands were constants, fold it.
3594 if (ConstOps.size() == I->getNumOperands()) {
3595 if (CmpInst *C = dyn_cast<CmpInst>(I))
3596 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
3597 ConstOps[1], Q.DL, Q.TLI);
3599 if (LoadInst *LI = dyn_cast<LoadInst>(I))
3600 if (!LI->isVolatile())
3601 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
3603 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
3610 /// Try to simplify a select instruction when its condition operand is an
3611 /// integer comparison where one operand of the compare is a constant.
3612 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
3613 const APInt *Y, bool TrueWhenUnset) {
3616 // (X & Y) == 0 ? X & ~Y : X --> X
3617 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
3618 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
3620 return TrueWhenUnset ? FalseVal : TrueVal;
3622 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
3623 // (X & Y) != 0 ? X : X & ~Y --> X
3624 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
3626 return TrueWhenUnset ? FalseVal : TrueVal;
3628 if (Y->isPowerOf2()) {
3629 // (X & Y) == 0 ? X | Y : X --> X | Y
3630 // (X & Y) != 0 ? X | Y : X --> X
3631 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
3633 return TrueWhenUnset ? TrueVal : FalseVal;
3635 // (X & Y) == 0 ? X : X | Y --> X
3636 // (X & Y) != 0 ? X : X | Y --> X | Y
3637 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
3639 return TrueWhenUnset ? TrueVal : FalseVal;
3645 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
3647 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *TrueVal,
3649 bool TrueWhenUnset) {
3650 unsigned BitWidth = TrueVal->getType()->getScalarSizeInBits();
3654 APInt MinSignedValue;
3656 if (match(CmpLHS, m_Trunc(m_Value(X))) && (X == TrueVal || X == FalseVal)) {
3657 // icmp slt (trunc X), 0 <--> icmp ne (and X, C), 0
3658 // icmp sgt (trunc X), -1 <--> icmp eq (and X, C), 0
3659 unsigned DestSize = CmpLHS->getType()->getScalarSizeInBits();
3660 MinSignedValue = APInt::getSignedMinValue(DestSize).zext(BitWidth);
3662 // icmp slt X, 0 <--> icmp ne (and X, C), 0
3663 // icmp sgt X, -1 <--> icmp eq (and X, C), 0
3665 MinSignedValue = APInt::getSignedMinValue(BitWidth);
3668 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, &MinSignedValue,
3675 /// Try to simplify a select instruction when its condition operand is an
3676 /// integer comparison.
3677 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
3678 Value *FalseVal, const SimplifyQuery &Q,
3679 unsigned MaxRecurse) {
3680 ICmpInst::Predicate Pred;
3681 Value *CmpLHS, *CmpRHS;
3682 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
3685 // FIXME: This code is nearly duplicated in InstCombine. Using/refactoring
3686 // decomposeBitTestICmp() might help.
3687 if (ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero())) {
3690 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
3691 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
3692 Pred == ICmpInst::ICMP_EQ))
3694 } else if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, m_Zero())) {
3695 // Comparing signed-less-than 0 checks if the sign bit is set.
3696 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, TrueVal, FalseVal,
3699 } else if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes())) {
3700 // Comparing signed-greater-than -1 checks if the sign bit is not set.
3701 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, TrueVal, FalseVal,
3706 if (CondVal->hasOneUse()) {
3708 if (match(CmpRHS, m_APInt(C))) {
3709 // X < MIN ? T : F --> F
3710 if (Pred == ICmpInst::ICMP_SLT && C->isMinSignedValue())
3712 // X < MIN ? T : F --> F
3713 if (Pred == ICmpInst::ICMP_ULT && C->isMinValue())
3715 // X > MAX ? T : F --> F
3716 if (Pred == ICmpInst::ICMP_SGT && C->isMaxSignedValue())
3718 // X > MAX ? T : F --> F
3719 if (Pred == ICmpInst::ICMP_UGT && C->isMaxValue())
3724 // If we have an equality comparison, then we know the value in one of the
3725 // arms of the select. See if substituting this value into the arm and
3726 // simplifying the result yields the same value as the other arm.
3727 if (Pred == ICmpInst::ICMP_EQ) {
3728 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3730 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3733 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3735 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3738 } else if (Pred == ICmpInst::ICMP_NE) {
3739 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3741 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3744 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3746 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3754 /// Given operands for a SelectInst, see if we can fold the result.
3755 /// If not, this returns null.
3756 static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
3757 Value *FalseVal, const SimplifyQuery &Q,
3758 unsigned MaxRecurse) {
3759 // select true, X, Y -> X
3760 // select false, X, Y -> Y
3761 if (Constant *CB = dyn_cast<Constant>(CondVal)) {
3762 if (CB->isAllOnesValue())
3764 if (CB->isNullValue())
3768 // select C, X, X -> X
3769 if (TrueVal == FalseVal)
3772 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
3773 if (isa<Constant>(FalseVal))
3777 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
3779 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
3783 simplifySelectWithICmpCond(CondVal, TrueVal, FalseVal, Q, MaxRecurse))
3789 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
3790 const SimplifyQuery &Q) {
3791 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
3794 /// Given operands for an GetElementPtrInst, see if we can fold the result.
3795 /// If not, this returns null.
3796 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
3797 const SimplifyQuery &Q, unsigned) {
3798 // The type of the GEP pointer operand.
3800 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
3802 // getelementptr P -> P.
3803 if (Ops.size() == 1)
3806 // Compute the (pointer) type returned by the GEP instruction.
3807 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
3808 Type *GEPTy = PointerType::get(LastType, AS);
3809 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
3810 GEPTy = VectorType::get(GEPTy, VT->getNumElements());
3811 else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType()))
3812 GEPTy = VectorType::get(GEPTy, VT->getNumElements());
3814 if (isa<UndefValue>(Ops[0]))
3815 return UndefValue::get(GEPTy);
3817 if (Ops.size() == 2) {
3818 // getelementptr P, 0 -> P.
3819 if (match(Ops[1], m_Zero()))
3823 if (Ty->isSized()) {
3826 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
3827 // getelementptr P, N -> P if P points to a type of zero size.
3828 if (TyAllocSize == 0)
3831 // The following transforms are only safe if the ptrtoint cast
3832 // doesn't truncate the pointers.
3833 if (Ops[1]->getType()->getScalarSizeInBits() ==
3834 Q.DL.getPointerSizeInBits(AS)) {
3835 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
3836 if (match(P, m_Zero()))
3837 return Constant::getNullValue(GEPTy);
3839 if (match(P, m_PtrToInt(m_Value(Temp))))
3840 if (Temp->getType() == GEPTy)
3845 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
3846 if (TyAllocSize == 1 &&
3847 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0])))))
3848 if (Value *R = PtrToIntOrZero(P))
3851 // getelementptr V, (ashr (sub P, V), C) -> Q
3852 // if P points to a type of size 1 << C.
3854 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
3855 m_ConstantInt(C))) &&
3856 TyAllocSize == 1ULL << C)
3857 if (Value *R = PtrToIntOrZero(P))
3860 // getelementptr V, (sdiv (sub P, V), C) -> Q
3861 // if P points to a type of size C.
3863 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
3864 m_SpecificInt(TyAllocSize))))
3865 if (Value *R = PtrToIntOrZero(P))
3871 if (Q.DL.getTypeAllocSize(LastType) == 1 &&
3872 all_of(Ops.slice(1).drop_back(1),
3873 [](Value *Idx) { return match(Idx, m_Zero()); })) {
3875 Q.DL.getPointerSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
3876 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == PtrWidth) {
3877 APInt BasePtrOffset(PtrWidth, 0);
3878 Value *StrippedBasePtr =
3879 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
3882 // gep (gep V, C), (sub 0, V) -> C
3883 if (match(Ops.back(),
3884 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) {
3885 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
3886 return ConstantExpr::getIntToPtr(CI, GEPTy);
3888 // gep (gep V, C), (xor V, -1) -> C-1
3889 if (match(Ops.back(),
3890 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) {
3891 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
3892 return ConstantExpr::getIntToPtr(CI, GEPTy);
3897 // Check to see if this is constant foldable.
3898 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3899 if (!isa<Constant>(Ops[i]))
3902 return ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
3906 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
3907 const SimplifyQuery &Q) {
3908 return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit);
3911 /// Given operands for an InsertValueInst, see if we can fold the result.
3912 /// If not, this returns null.
3913 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
3914 ArrayRef<unsigned> Idxs, const SimplifyQuery &Q,
3916 if (Constant *CAgg = dyn_cast<Constant>(Agg))
3917 if (Constant *CVal = dyn_cast<Constant>(Val))
3918 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
3920 // insertvalue x, undef, n -> x
3921 if (match(Val, m_Undef()))
3924 // insertvalue x, (extractvalue y, n), n
3925 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
3926 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
3927 EV->getIndices() == Idxs) {
3928 // insertvalue undef, (extractvalue y, n), n -> y
3929 if (match(Agg, m_Undef()))
3930 return EV->getAggregateOperand();
3932 // insertvalue y, (extractvalue y, n), n -> y
3933 if (Agg == EV->getAggregateOperand())
3940 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
3941 ArrayRef<unsigned> Idxs,
3942 const SimplifyQuery &Q) {
3943 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
3946 /// Given operands for an ExtractValueInst, see if we can fold the result.
3947 /// If not, this returns null.
3948 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
3949 const SimplifyQuery &, unsigned) {
3950 if (auto *CAgg = dyn_cast<Constant>(Agg))
3951 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
3953 // extractvalue x, (insertvalue y, elt, n), n -> elt
3954 unsigned NumIdxs = Idxs.size();
3955 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
3956 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
3957 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
3958 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
3959 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
3960 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
3961 Idxs.slice(0, NumCommonIdxs)) {
3962 if (NumIdxs == NumInsertValueIdxs)
3963 return IVI->getInsertedValueOperand();
3971 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
3972 const SimplifyQuery &Q) {
3973 return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
3976 /// Given operands for an ExtractElementInst, see if we can fold the result.
3977 /// If not, this returns null.
3978 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &,
3980 if (auto *CVec = dyn_cast<Constant>(Vec)) {
3981 if (auto *CIdx = dyn_cast<Constant>(Idx))
3982 return ConstantFoldExtractElementInstruction(CVec, CIdx);
3984 // The index is not relevant if our vector is a splat.
3985 if (auto *Splat = CVec->getSplatValue())
3988 if (isa<UndefValue>(Vec))
3989 return UndefValue::get(Vec->getType()->getVectorElementType());
3992 // If extracting a specified index from the vector, see if we can recursively
3993 // find a previously computed scalar that was inserted into the vector.
3994 if (auto *IdxC = dyn_cast<ConstantInt>(Idx))
3995 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
4001 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx,
4002 const SimplifyQuery &Q) {
4003 return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
4006 /// See if we can fold the given phi. If not, returns null.
4007 static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) {
4008 // If all of the PHI's incoming values are the same then replace the PHI node
4009 // with the common value.
4010 Value *CommonValue = nullptr;
4011 bool HasUndefInput = false;
4012 for (Value *Incoming : PN->incoming_values()) {
4013 // If the incoming value is the phi node itself, it can safely be skipped.
4014 if (Incoming == PN) continue;
4015 if (isa<UndefValue>(Incoming)) {
4016 // Remember that we saw an undef value, but otherwise ignore them.
4017 HasUndefInput = true;
4020 if (CommonValue && Incoming != CommonValue)
4021 return nullptr; // Not the same, bail out.
4022 CommonValue = Incoming;
4025 // If CommonValue is null then all of the incoming values were either undef or
4026 // equal to the phi node itself.
4028 return UndefValue::get(PN->getType());
4030 // If we have a PHI node like phi(X, undef, X), where X is defined by some
4031 // instruction, we cannot return X as the result of the PHI node unless it
4032 // dominates the PHI block.
4034 return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
4039 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
4040 Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) {
4041 if (auto *C = dyn_cast<Constant>(Op))
4042 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
4044 if (auto *CI = dyn_cast<CastInst>(Op)) {
4045 auto *Src = CI->getOperand(0);
4046 Type *SrcTy = Src->getType();
4047 Type *MidTy = CI->getType();
4049 if (Src->getType() == Ty) {
4050 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
4051 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
4053 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
4055 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
4057 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
4058 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
4059 SrcIntPtrTy, MidIntPtrTy,
4060 DstIntPtrTy) == Instruction::BitCast)
4066 if (CastOpc == Instruction::BitCast)
4067 if (Op->getType() == Ty)
4073 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
4074 const SimplifyQuery &Q) {
4075 return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
4078 /// For the given destination element of a shuffle, peek through shuffles to
4079 /// match a root vector source operand that contains that element in the same
4080 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
4081 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
4082 int MaskVal, Value *RootVec,
4083 unsigned MaxRecurse) {
4087 // Bail out if any mask value is undefined. That kind of shuffle may be
4088 // simplified further based on demanded bits or other folds.
4092 // The mask value chooses which source operand we need to look at next.
4093 int InVecNumElts = Op0->getType()->getVectorNumElements();
4094 int RootElt = MaskVal;
4095 Value *SourceOp = Op0;
4096 if (MaskVal >= InVecNumElts) {
4097 RootElt = MaskVal - InVecNumElts;
4101 // If the source operand is a shuffle itself, look through it to find the
4102 // matching root vector.
4103 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
4104 return foldIdentityShuffles(
4105 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
4106 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
4109 // TODO: Look through bitcasts? What if the bitcast changes the vector element
4112 // The source operand is not a shuffle. Initialize the root vector value for
4113 // this shuffle if that has not been done yet.
4117 // Give up as soon as a source operand does not match the existing root value.
4118 if (RootVec != SourceOp)
4121 // The element must be coming from the same lane in the source vector
4122 // (although it may have crossed lanes in intermediate shuffles).
4123 if (RootElt != DestElt)
4129 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
4130 Type *RetTy, const SimplifyQuery &Q,
4131 unsigned MaxRecurse) {
4132 if (isa<UndefValue>(Mask))
4133 return UndefValue::get(RetTy);
4135 Type *InVecTy = Op0->getType();
4136 unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
4137 unsigned InVecNumElts = InVecTy->getVectorNumElements();
4139 SmallVector<int, 32> Indices;
4140 ShuffleVectorInst::getShuffleMask(Mask, Indices);
4141 assert(MaskNumElts == Indices.size() &&
4142 "Size of Indices not same as number of mask elements?");
4144 // Canonicalization: If mask does not select elements from an input vector,
4145 // replace that input vector with undef.
4146 bool MaskSelects0 = false, MaskSelects1 = false;
4147 for (unsigned i = 0; i != MaskNumElts; ++i) {
4148 if (Indices[i] == -1)
4150 if ((unsigned)Indices[i] < InVecNumElts)
4151 MaskSelects0 = true;
4153 MaskSelects1 = true;
4156 Op0 = UndefValue::get(InVecTy);
4158 Op1 = UndefValue::get(InVecTy);
4160 auto *Op0Const = dyn_cast<Constant>(Op0);
4161 auto *Op1Const = dyn_cast<Constant>(Op1);
4163 // If all operands are constant, constant fold the shuffle.
4164 if (Op0Const && Op1Const)
4165 return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
4167 // Canonicalization: if only one input vector is constant, it shall be the
4169 if (Op0Const && !Op1Const) {
4170 std::swap(Op0, Op1);
4171 ShuffleVectorInst::commuteShuffleMask(Indices, InVecNumElts);
4174 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
4175 // value type is same as the input vectors' type.
4176 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
4177 if (isa<UndefValue>(Op1) && RetTy == InVecTy &&
4178 OpShuf->getMask()->getSplatValue())
4181 // Don't fold a shuffle with undef mask elements. This may get folded in a
4182 // better way using demanded bits or other analysis.
4183 // TODO: Should we allow this?
4184 if (find(Indices, -1) != Indices.end())
4187 // Check if every element of this shuffle can be mapped back to the
4188 // corresponding element of a single root vector. If so, we don't need this
4189 // shuffle. This handles simple identity shuffles as well as chains of
4190 // shuffles that may widen/narrow and/or move elements across lanes and back.
4191 Value *RootVec = nullptr;
4192 for (unsigned i = 0; i != MaskNumElts; ++i) {
4193 // Note that recursion is limited for each vector element, so if any element
4194 // exceeds the limit, this will fail to simplify.
4196 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
4198 // We can't replace a widening/narrowing shuffle with one of its operands.
4199 if (!RootVec || RootVec->getType() != RetTy)
4205 /// Given operands for a ShuffleVectorInst, fold the result or return null.
4206 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
4207 Type *RetTy, const SimplifyQuery &Q) {
4208 return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
4211 //=== Helper functions for higher up the class hierarchy.
4213 /// Given operands for a BinaryOperator, see if we can fold the result.
4214 /// If not, this returns null.
4215 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4216 const SimplifyQuery &Q, unsigned MaxRecurse) {
4218 case Instruction::Add:
4219 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse);
4220 case Instruction::FAdd:
4221 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4222 case Instruction::Sub:
4223 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse);
4224 case Instruction::FSub:
4225 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4226 case Instruction::Mul:
4227 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse);
4228 case Instruction::FMul:
4229 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4230 case Instruction::SDiv:
4231 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
4232 case Instruction::UDiv:
4233 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
4234 case Instruction::FDiv:
4235 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4236 case Instruction::SRem:
4237 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
4238 case Instruction::URem:
4239 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
4240 case Instruction::FRem:
4241 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4242 case Instruction::Shl:
4243 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse);
4244 case Instruction::LShr:
4245 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse);
4246 case Instruction::AShr:
4247 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse);
4248 case Instruction::And:
4249 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
4250 case Instruction::Or:
4251 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse);
4252 case Instruction::Xor:
4253 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
4255 llvm_unreachable("Unexpected opcode");
4259 /// Given operands for a BinaryOperator, see if we can fold the result.
4260 /// If not, this returns null.
4261 /// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
4262 /// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
4263 static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4264 const FastMathFlags &FMF, const SimplifyQuery &Q,
4265 unsigned MaxRecurse) {
4267 case Instruction::FAdd:
4268 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
4269 case Instruction::FSub:
4270 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
4271 case Instruction::FMul:
4272 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
4273 case Instruction::FDiv:
4274 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
4276 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
4280 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4281 const SimplifyQuery &Q) {
4282 return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
4285 Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4286 FastMathFlags FMF, const SimplifyQuery &Q) {
4287 return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
4290 /// Given operands for a CmpInst, see if we can fold the result.
4291 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4292 const SimplifyQuery &Q, unsigned MaxRecurse) {
4293 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
4294 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
4295 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4298 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4299 const SimplifyQuery &Q) {
4300 return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
4303 static bool IsIdempotent(Intrinsic::ID ID) {
4305 default: return false;
4307 // Unary idempotent: f(f(x)) = f(x)
4308 case Intrinsic::fabs:
4309 case Intrinsic::floor:
4310 case Intrinsic::ceil:
4311 case Intrinsic::trunc:
4312 case Intrinsic::rint:
4313 case Intrinsic::nearbyint:
4314 case Intrinsic::round:
4319 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset,
4320 const DataLayout &DL) {
4321 GlobalValue *PtrSym;
4323 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
4326 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
4327 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
4328 Type *Int32PtrTy = Int32Ty->getPointerTo();
4329 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
4331 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
4332 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
4335 uint64_t OffsetInt = OffsetConstInt->getSExtValue();
4336 if (OffsetInt % 4 != 0)
4339 Constant *C = ConstantExpr::getGetElementPtr(
4340 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
4341 ConstantInt::get(Int64Ty, OffsetInt / 4));
4342 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
4346 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
4350 if (LoadedCE->getOpcode() == Instruction::Trunc) {
4351 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
4356 if (LoadedCE->getOpcode() != Instruction::Sub)
4359 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
4360 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
4362 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
4364 Constant *LoadedRHS = LoadedCE->getOperand(1);
4365 GlobalValue *LoadedRHSSym;
4366 APInt LoadedRHSOffset;
4367 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
4369 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
4372 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
4375 static bool maskIsAllZeroOrUndef(Value *Mask) {
4376 auto *ConstMask = dyn_cast<Constant>(Mask);
4379 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
4381 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
4383 if (auto *MaskElt = ConstMask->getAggregateElement(I))
4384 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
4391 template <typename IterTy>
4392 static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
4393 const SimplifyQuery &Q, unsigned MaxRecurse) {
4394 Intrinsic::ID IID = F->getIntrinsicID();
4395 unsigned NumOperands = std::distance(ArgBegin, ArgEnd);
4398 if (NumOperands == 1) {
4399 // Perform idempotent optimizations
4400 if (IsIdempotent(IID)) {
4401 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin)) {
4402 if (II->getIntrinsicID() == IID)
4408 case Intrinsic::fabs: {
4409 if (SignBitMustBeZero(*ArgBegin, Q.TLI))
4419 if (NumOperands == 2) {
4420 Value *LHS = *ArgBegin;
4421 Value *RHS = *(ArgBegin + 1);
4422 Type *ReturnType = F->getReturnType();
4425 case Intrinsic::usub_with_overflow:
4426 case Intrinsic::ssub_with_overflow: {
4427 // X - X -> { 0, false }
4429 return Constant::getNullValue(ReturnType);
4431 // X - undef -> undef
4432 // undef - X -> undef
4433 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
4434 return UndefValue::get(ReturnType);
4438 case Intrinsic::uadd_with_overflow:
4439 case Intrinsic::sadd_with_overflow: {
4440 // X + undef -> undef
4441 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
4442 return UndefValue::get(ReturnType);
4446 case Intrinsic::umul_with_overflow:
4447 case Intrinsic::smul_with_overflow: {
4448 // 0 * X -> { 0, false }
4449 // X * 0 -> { 0, false }
4450 if (match(LHS, m_Zero()) || match(RHS, m_Zero()))
4451 return Constant::getNullValue(ReturnType);
4453 // undef * X -> { 0, false }
4454 // X * undef -> { 0, false }
4455 if (match(LHS, m_Undef()) || match(RHS, m_Undef()))
4456 return Constant::getNullValue(ReturnType);
4460 case Intrinsic::load_relative: {
4461 Constant *C0 = dyn_cast<Constant>(LHS);
4462 Constant *C1 = dyn_cast<Constant>(RHS);
4464 return SimplifyRelativeLoad(C0, C1, Q.DL);
4472 // Simplify calls to llvm.masked.load.*
4474 case Intrinsic::masked_load: {
4475 Value *MaskArg = ArgBegin[2];
4476 Value *PassthruArg = ArgBegin[3];
4477 // If the mask is all zeros or undef, the "passthru" argument is the result.
4478 if (maskIsAllZeroOrUndef(MaskArg))
4487 template <typename IterTy>
4488 static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
4489 const SimplifyQuery &Q, unsigned MaxRecurse) {
4490 Type *Ty = V->getType();
4491 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
4492 Ty = PTy->getElementType();
4493 FunctionType *FTy = cast<FunctionType>(Ty);
4495 // call undef -> undef
4496 // call null -> undef
4497 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4498 return UndefValue::get(FTy->getReturnType());
4500 Function *F = dyn_cast<Function>(V);
4504 if (F->isIntrinsic())
4505 if (Value *Ret = SimplifyIntrinsic(F, ArgBegin, ArgEnd, Q, MaxRecurse))
4508 if (!canConstantFoldCallTo(F))
4511 SmallVector<Constant *, 4> ConstantArgs;
4512 ConstantArgs.reserve(ArgEnd - ArgBegin);
4513 for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) {
4514 Constant *C = dyn_cast<Constant>(*I);
4517 ConstantArgs.push_back(C);
4520 return ConstantFoldCall(F, ConstantArgs, Q.TLI);
4523 Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
4524 User::op_iterator ArgEnd, const SimplifyQuery &Q) {
4525 return ::SimplifyCall(V, ArgBegin, ArgEnd, Q, RecursionLimit);
4528 Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
4529 const SimplifyQuery &Q) {
4530 return ::SimplifyCall(V, Args.begin(), Args.end(), Q, RecursionLimit);
4533 /// See if we can compute a simplified version of this instruction.
4534 /// If not, this returns null.
4536 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
4537 OptimizationRemarkEmitter *ORE) {
4538 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
4541 switch (I->getOpcode()) {
4543 Result = ConstantFoldInstruction(I, Q.DL, Q.TLI);
4545 case Instruction::FAdd:
4546 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
4547 I->getFastMathFlags(), Q);
4549 case Instruction::Add:
4550 Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
4551 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4552 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
4554 case Instruction::FSub:
4555 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
4556 I->getFastMathFlags(), Q);
4558 case Instruction::Sub:
4559 Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
4560 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4561 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
4563 case Instruction::FMul:
4564 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
4565 I->getFastMathFlags(), Q);
4567 case Instruction::Mul:
4568 Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q);
4570 case Instruction::SDiv:
4571 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q);
4573 case Instruction::UDiv:
4574 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q);
4576 case Instruction::FDiv:
4577 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
4578 I->getFastMathFlags(), Q);
4580 case Instruction::SRem:
4581 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q);
4583 case Instruction::URem:
4584 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q);
4586 case Instruction::FRem:
4587 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
4588 I->getFastMathFlags(), Q);
4590 case Instruction::Shl:
4591 Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
4592 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4593 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
4595 case Instruction::LShr:
4596 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
4597 cast<BinaryOperator>(I)->isExact(), Q);
4599 case Instruction::AShr:
4600 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
4601 cast<BinaryOperator>(I)->isExact(), Q);
4603 case Instruction::And:
4604 Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q);
4606 case Instruction::Or:
4607 Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q);
4609 case Instruction::Xor:
4610 Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q);
4612 case Instruction::ICmp:
4613 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
4614 I->getOperand(0), I->getOperand(1), Q);
4616 case Instruction::FCmp:
4618 SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0),
4619 I->getOperand(1), I->getFastMathFlags(), Q);
4621 case Instruction::Select:
4622 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
4623 I->getOperand(2), Q);
4625 case Instruction::GetElementPtr: {
4626 SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end());
4627 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
4631 case Instruction::InsertValue: {
4632 InsertValueInst *IV = cast<InsertValueInst>(I);
4633 Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
4634 IV->getInsertedValueOperand(),
4635 IV->getIndices(), Q);
4638 case Instruction::ExtractValue: {
4639 auto *EVI = cast<ExtractValueInst>(I);
4640 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
4641 EVI->getIndices(), Q);
4644 case Instruction::ExtractElement: {
4645 auto *EEI = cast<ExtractElementInst>(I);
4646 Result = SimplifyExtractElementInst(EEI->getVectorOperand(),
4647 EEI->getIndexOperand(), Q);
4650 case Instruction::ShuffleVector: {
4651 auto *SVI = cast<ShuffleVectorInst>(I);
4652 Result = SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
4653 SVI->getMask(), SVI->getType(), Q);
4656 case Instruction::PHI:
4657 Result = SimplifyPHINode(cast<PHINode>(I), Q);
4659 case Instruction::Call: {
4660 CallSite CS(cast<CallInst>(I));
4661 Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(), CS.arg_end(), Q);
4664 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
4665 #include "llvm/IR/Instruction.def"
4666 #undef HANDLE_CAST_INST
4668 SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q);
4670 case Instruction::Alloca:
4671 // No simplifications for Alloca and it can't be constant folded.
4676 // In general, it is possible for computeKnownBits to determine all bits in a
4677 // value even when the operands are not all constants.
4678 if (!Result && I->getType()->isIntOrIntVectorTy()) {
4679 KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
4680 if (Known.isConstant())
4681 Result = ConstantInt::get(I->getType(), Known.getConstant());
4684 /// If called on unreachable code, the above logic may report that the
4685 /// instruction simplified to itself. Make life easier for users by
4686 /// detecting that case here, returning a safe value instead.
4687 return Result == I ? UndefValue::get(I->getType()) : Result;
4690 /// \brief Implementation of recursive simplification through an instruction's
4693 /// This is the common implementation of the recursive simplification routines.
4694 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
4695 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
4696 /// instructions to process and attempt to simplify it using
4697 /// InstructionSimplify.
4699 /// This routine returns 'true' only when *it* simplifies something. The passed
4700 /// in simplified value does not count toward this.
4701 static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
4702 const TargetLibraryInfo *TLI,
4703 const DominatorTree *DT,
4704 AssumptionCache *AC) {
4705 bool Simplified = false;
4706 SmallSetVector<Instruction *, 8> Worklist;
4707 const DataLayout &DL = I->getModule()->getDataLayout();
4709 // If we have an explicit value to collapse to, do that round of the
4710 // simplification loop by hand initially.
4712 for (User *U : I->users())
4714 Worklist.insert(cast<Instruction>(U));
4716 // Replace the instruction with its simplified value.
4717 I->replaceAllUsesWith(SimpleV);
4719 // Gracefully handle edge cases where the instruction is not wired into any
4721 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) &&
4722 !I->mayHaveSideEffects())
4723 I->eraseFromParent();
4728 // Note that we must test the size on each iteration, the worklist can grow.
4729 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
4732 // See if this instruction simplifies.
4733 SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC});
4739 // Stash away all the uses of the old instruction so we can check them for
4740 // recursive simplifications after a RAUW. This is cheaper than checking all
4741 // uses of To on the recursive step in most cases.
4742 for (User *U : I->users())
4743 Worklist.insert(cast<Instruction>(U));
4745 // Replace the instruction with its simplified value.
4746 I->replaceAllUsesWith(SimpleV);
4748 // Gracefully handle edge cases where the instruction is not wired into any
4750 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) &&
4751 !I->mayHaveSideEffects())
4752 I->eraseFromParent();
4757 bool llvm::recursivelySimplifyInstruction(Instruction *I,
4758 const TargetLibraryInfo *TLI,
4759 const DominatorTree *DT,
4760 AssumptionCache *AC) {
4761 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC);
4764 bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
4765 const TargetLibraryInfo *TLI,
4766 const DominatorTree *DT,
4767 AssumptionCache *AC) {
4768 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
4769 assert(SimpleV && "Must provide a simplified value.");
4770 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);
4774 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
4775 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
4776 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
4777 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
4778 auto *TLI = TLIWP ? &TLIWP->getTLI() : nullptr;
4779 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
4780 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
4781 return {F.getParent()->getDataLayout(), TLI, DT, AC};
4784 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
4785 const DataLayout &DL) {
4786 return {DL, &AR.TLI, &AR.DT, &AR.AC};
4789 template <class T, class... TArgs>
4790 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
4792 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
4793 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
4794 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
4795 return {F.getParent()->getDataLayout(), TLI, DT, AC};
4797 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,