1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements routines for folding instructions into simpler forms
11 // that do not require creating new instructions. This does constant folding
12 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
13 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
14 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
15 // simplified: This is usually true and assuming it simplifies the logic (if
16 // they have not been simplified then results are correct but maybe suboptimal).
18 //===----------------------------------------------------------------------===//
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/CaptureTracking.h"
26 #include "llvm/Analysis/CmpInstAnalysis.h"
27 #include "llvm/Analysis/ConstantFolding.h"
28 #include "llvm/Analysis/LoopAnalysisManager.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Analysis/VectorUtils.h"
32 #include "llvm/IR/ConstantRange.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/Operator.h"
38 #include "llvm/IR/PatternMatch.h"
39 #include "llvm/IR/ValueHandle.h"
40 #include "llvm/Support/KnownBits.h"
43 using namespace llvm::PatternMatch;
45 #define DEBUG_TYPE "instsimplify"
47 enum { RecursionLimit = 3 };
49 STATISTIC(NumExpand, "Number of expansions");
50 STATISTIC(NumReassoc, "Number of reassociations");
52 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned);
53 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
55 static Value *SimplifyFPBinOp(unsigned, Value *, Value *, const FastMathFlags &,
56 const SimplifyQuery &, unsigned);
57 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
59 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
60 const SimplifyQuery &Q, unsigned MaxRecurse);
61 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
62 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned);
63 static Value *SimplifyCastInst(unsigned, Value *, Type *,
64 const SimplifyQuery &, unsigned);
65 static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, const SimplifyQuery &,
68 /// For a boolean type or a vector of boolean type, return false or a vector
69 /// with every element false.
70 static Constant *getFalse(Type *Ty) {
71 return ConstantInt::getFalse(Ty);
74 /// For a boolean type or a vector of boolean type, return true or a vector
75 /// with every element true.
76 static Constant *getTrue(Type *Ty) {
77 return ConstantInt::getTrue(Ty);
80 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
81 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
83 CmpInst *Cmp = dyn_cast<CmpInst>(V);
86 CmpInst::Predicate CPred = Cmp->getPredicate();
87 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
88 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
90 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
94 /// Does the given value dominate the specified phi node?
95 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
96 Instruction *I = dyn_cast<Instruction>(V);
98 // Arguments and constants dominate all instructions.
101 // If we are processing instructions (and/or basic blocks) that have not been
102 // fully added to a function, the parent nodes may still be null. Simply
103 // return the conservative answer in these cases.
104 if (!I->getParent() || !P->getParent() || !I->getFunction())
107 // If we have a DominatorTree then do a precise test.
109 return DT->dominates(I, P);
111 // Otherwise, if the instruction is in the entry block and is not an invoke,
112 // then it obviously dominates all phi nodes.
113 if (I->getParent() == &I->getFunction()->getEntryBlock() &&
120 /// Simplify "A op (B op' C)" by distributing op over op', turning it into
121 /// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is
122 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS.
123 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)".
124 /// Returns the simplified value, or null if no simplification was performed.
125 static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS,
126 Instruction::BinaryOps OpcodeToExpand,
127 const SimplifyQuery &Q, unsigned MaxRecurse) {
128 // Recursion is always used, so bail out at once if we already hit the limit.
132 // Check whether the expression has the form "(A op' B) op C".
133 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
134 if (Op0->getOpcode() == OpcodeToExpand) {
135 // It does! Try turning it into "(A op C) op' (B op C)".
136 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
137 // Do "A op C" and "B op C" both simplify?
138 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse))
139 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
140 // They do! Return "L op' R" if it simplifies or is already available.
141 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
142 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand)
143 && L == B && R == A)) {
147 // Otherwise return "L op' R" if it simplifies.
148 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
155 // Check whether the expression has the form "A op (B op' C)".
156 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
157 if (Op1->getOpcode() == OpcodeToExpand) {
158 // It does! Try turning it into "(A op B) op' (A op C)".
159 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
160 // Do "A op B" and "A op C" both simplify?
161 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse))
162 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) {
163 // They do! Return "L op' R" if it simplifies or is already available.
164 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
165 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand)
166 && L == C && R == B)) {
170 // Otherwise return "L op' R" if it simplifies.
171 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
181 /// Generic simplifications for associative binary operations.
182 /// Returns the simpler value, or null if none was found.
183 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
184 Value *LHS, Value *RHS,
185 const SimplifyQuery &Q,
186 unsigned MaxRecurse) {
187 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
189 // Recursion is always used, so bail out at once if we already hit the limit.
193 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
194 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
196 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
197 if (Op0 && Op0->getOpcode() == Opcode) {
198 Value *A = Op0->getOperand(0);
199 Value *B = Op0->getOperand(1);
202 // Does "B op C" simplify?
203 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
204 // It does! Return "A op V" if it simplifies or is already available.
205 // If V equals B then "A op V" is just the LHS.
206 if (V == B) return LHS;
207 // Otherwise return "A op V" if it simplifies.
208 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
215 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
216 if (Op1 && Op1->getOpcode() == Opcode) {
218 Value *B = Op1->getOperand(0);
219 Value *C = Op1->getOperand(1);
221 // Does "A op B" simplify?
222 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
223 // It does! Return "V op C" if it simplifies or is already available.
224 // If V equals B then "V op C" is just the RHS.
225 if (V == B) return RHS;
226 // Otherwise return "V op C" if it simplifies.
227 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
234 // The remaining transforms require commutativity as well as associativity.
235 if (!Instruction::isCommutative(Opcode))
238 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
239 if (Op0 && Op0->getOpcode() == Opcode) {
240 Value *A = Op0->getOperand(0);
241 Value *B = Op0->getOperand(1);
244 // Does "C op A" simplify?
245 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
246 // It does! Return "V op B" if it simplifies or is already available.
247 // If V equals A then "V op B" is just the LHS.
248 if (V == A) return LHS;
249 // Otherwise return "V op B" if it simplifies.
250 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
257 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
258 if (Op1 && Op1->getOpcode() == Opcode) {
260 Value *B = Op1->getOperand(0);
261 Value *C = Op1->getOperand(1);
263 // Does "C op A" simplify?
264 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
265 // It does! Return "B op V" if it simplifies or is already available.
266 // If V equals C then "B op V" is just the RHS.
267 if (V == C) return RHS;
268 // Otherwise return "B op V" if it simplifies.
269 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
279 /// In the case of a binary operation with a select instruction as an operand,
280 /// try to simplify the binop by seeing whether evaluating it on both branches
281 /// of the select results in the same value. Returns the common value if so,
282 /// otherwise returns null.
283 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
284 Value *RHS, const SimplifyQuery &Q,
285 unsigned MaxRecurse) {
286 // Recursion is always used, so bail out at once if we already hit the limit.
291 if (isa<SelectInst>(LHS)) {
292 SI = cast<SelectInst>(LHS);
294 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
295 SI = cast<SelectInst>(RHS);
298 // Evaluate the BinOp on the true and false branches of the select.
302 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
303 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
305 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
306 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
309 // If they simplified to the same value, then return the common value.
310 // If they both failed to simplify then return null.
314 // If one branch simplified to undef, return the other one.
315 if (TV && isa<UndefValue>(TV))
317 if (FV && isa<UndefValue>(FV))
320 // If applying the operation did not change the true and false select values,
321 // then the result of the binop is the select itself.
322 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
325 // If one branch simplified and the other did not, and the simplified
326 // value is equal to the unsimplified one, return the simplified value.
327 // For example, select (cond, X, X & Z) & Z -> X & Z.
328 if ((FV && !TV) || (TV && !FV)) {
329 // Check that the simplified value has the form "X op Y" where "op" is the
330 // same as the original operation.
331 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
332 if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) {
333 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
334 // We already know that "op" is the same as for the simplified value. See
335 // if the operands match too. If so, return the simplified value.
336 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
337 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
338 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
339 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
340 Simplified->getOperand(1) == UnsimplifiedRHS)
342 if (Simplified->isCommutative() &&
343 Simplified->getOperand(1) == UnsimplifiedLHS &&
344 Simplified->getOperand(0) == UnsimplifiedRHS)
352 /// In the case of a comparison with a select instruction, try to simplify the
353 /// comparison by seeing whether both branches of the select result in the same
354 /// value. Returns the common value if so, otherwise returns null.
355 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
356 Value *RHS, const SimplifyQuery &Q,
357 unsigned MaxRecurse) {
358 // Recursion is always used, so bail out at once if we already hit the limit.
362 // Make sure the select is on the LHS.
363 if (!isa<SelectInst>(LHS)) {
365 Pred = CmpInst::getSwappedPredicate(Pred);
367 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
368 SelectInst *SI = cast<SelectInst>(LHS);
369 Value *Cond = SI->getCondition();
370 Value *TV = SI->getTrueValue();
371 Value *FV = SI->getFalseValue();
373 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
374 // Does "cmp TV, RHS" simplify?
375 Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, Q, MaxRecurse);
377 // It not only simplified, it simplified to the select condition. Replace
379 TCmp = getTrue(Cond->getType());
381 // It didn't simplify. However if "cmp TV, RHS" is equal to the select
382 // condition then we can replace it with 'true'. Otherwise give up.
383 if (!isSameCompare(Cond, Pred, TV, RHS))
385 TCmp = getTrue(Cond->getType());
388 // Does "cmp FV, RHS" simplify?
389 Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, Q, MaxRecurse);
391 // It not only simplified, it simplified to the select condition. Replace
393 FCmp = getFalse(Cond->getType());
395 // It didn't simplify. However if "cmp FV, RHS" is equal to the select
396 // condition then we can replace it with 'false'. Otherwise give up.
397 if (!isSameCompare(Cond, Pred, FV, RHS))
399 FCmp = getFalse(Cond->getType());
402 // If both sides simplified to the same value, then use it as the result of
403 // the original comparison.
407 // The remaining cases only make sense if the select condition has the same
408 // type as the result of the comparison, so bail out if this is not so.
409 if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
411 // If the false value simplified to false, then the result of the compare
412 // is equal to "Cond && TCmp". This also catches the case when the false
413 // value simplified to false and the true value to true, returning "Cond".
414 if (match(FCmp, m_Zero()))
415 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
417 // If the true value simplified to true, then the result of the compare
418 // is equal to "Cond || FCmp".
419 if (match(TCmp, m_One()))
420 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
422 // Finally, if the false value simplified to true and the true value to
423 // false, then the result of the compare is equal to "!Cond".
424 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
426 SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()),
433 /// In the case of a binary operation with an operand that is a PHI instruction,
434 /// try to simplify the binop by seeing whether evaluating it on the incoming
435 /// phi values yields the same result for every value. If so returns the common
436 /// value, otherwise returns null.
437 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
438 Value *RHS, const SimplifyQuery &Q,
439 unsigned MaxRecurse) {
440 // Recursion is always used, so bail out at once if we already hit the limit.
445 if (isa<PHINode>(LHS)) {
446 PI = cast<PHINode>(LHS);
447 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
448 if (!valueDominatesPHI(RHS, PI, Q.DT))
451 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
452 PI = cast<PHINode>(RHS);
453 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
454 if (!valueDominatesPHI(LHS, PI, Q.DT))
458 // Evaluate the BinOp on the incoming phi values.
459 Value *CommonValue = nullptr;
460 for (Value *Incoming : PI->incoming_values()) {
461 // If the incoming value is the phi node itself, it can safely be skipped.
462 if (Incoming == PI) continue;
463 Value *V = PI == LHS ?
464 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
465 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
466 // If the operation failed to simplify, or simplified to a different value
467 // to previously, then give up.
468 if (!V || (CommonValue && V != CommonValue))
476 /// In the case of a comparison with a PHI instruction, try to simplify the
477 /// comparison by seeing whether comparing with all of the incoming phi values
478 /// yields the same result every time. If so returns the common result,
479 /// otherwise returns null.
480 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
481 const SimplifyQuery &Q, unsigned MaxRecurse) {
482 // Recursion is always used, so bail out at once if we already hit the limit.
486 // Make sure the phi is on the LHS.
487 if (!isa<PHINode>(LHS)) {
489 Pred = CmpInst::getSwappedPredicate(Pred);
491 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
492 PHINode *PI = cast<PHINode>(LHS);
494 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
495 if (!valueDominatesPHI(RHS, PI, Q.DT))
498 // Evaluate the BinOp on the incoming phi values.
499 Value *CommonValue = nullptr;
500 for (Value *Incoming : PI->incoming_values()) {
501 // If the incoming value is the phi node itself, it can safely be skipped.
502 if (Incoming == PI) continue;
503 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse);
504 // If the operation failed to simplify, or simplified to a different value
505 // to previously, then give up.
506 if (!V || (CommonValue && V != CommonValue))
514 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
515 Value *&Op0, Value *&Op1,
516 const SimplifyQuery &Q) {
517 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
518 if (auto *CRHS = dyn_cast<Constant>(Op1))
519 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
521 // Canonicalize the constant to the RHS if this is a commutative operation.
522 if (Instruction::isCommutative(Opcode))
528 /// Given operands for an Add, see if we can fold the result.
529 /// If not, this returns null.
530 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
531 const SimplifyQuery &Q, unsigned MaxRecurse) {
532 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
535 // X + undef -> undef
536 if (match(Op1, m_Undef()))
540 if (match(Op1, m_Zero()))
547 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
548 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
551 // X + ~X -> -1 since ~X = -X-1
552 Type *Ty = Op0->getType();
553 if (match(Op0, m_Not(m_Specific(Op1))) ||
554 match(Op1, m_Not(m_Specific(Op0))))
555 return Constant::getAllOnesValue(Ty);
557 // add nsw/nuw (xor Y, signmask), signmask --> Y
558 // The no-wrapping add guarantees that the top bit will be set by the add.
559 // Therefore, the xor must be clearing the already set sign bit of Y.
560 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
561 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
564 // add nuw %x, -1 -> -1, because %x can only be 0.
565 if (IsNUW && match(Op1, m_AllOnes()))
566 return Op1; // Which is -1.
569 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
570 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
573 // Try some generic simplifications for associative operations.
574 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
578 // Threading Add over selects and phi nodes is pointless, so don't bother.
579 // Threading over the select in "A + select(cond, B, C)" means evaluating
580 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
581 // only if B and C are equal. If B and C are equal then (since we assume
582 // that operands have already been simplified) "select(cond, B, C)" should
583 // have been simplified to the common value of B and C already. Analysing
584 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
585 // for threading over phi nodes.
590 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
591 const SimplifyQuery &Query) {
592 return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
595 /// Compute the base pointer and cumulative constant offsets for V.
597 /// This strips all constant offsets off of V, leaving it the base pointer, and
598 /// accumulates the total constant offset applied in the returned constant. It
599 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
600 /// no constant offsets applied.
602 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
603 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
605 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
606 bool AllowNonInbounds = false) {
607 assert(V->getType()->isPtrOrPtrVectorTy());
609 Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
610 APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
612 // Even though we don't look through PHI nodes, we could be called on an
613 // instruction in an unreachable block, which may be on a cycle.
614 SmallPtrSet<Value *, 4> Visited;
617 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
618 if ((!AllowNonInbounds && !GEP->isInBounds()) ||
619 !GEP->accumulateConstantOffset(DL, Offset))
621 V = GEP->getPointerOperand();
622 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
623 V = cast<Operator>(V)->getOperand(0);
624 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
625 if (GA->isInterposable())
627 V = GA->getAliasee();
629 if (auto CS = CallSite(V))
630 if (Value *RV = CS.getReturnedArgOperand()) {
636 assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!");
637 } while (Visited.insert(V).second);
639 Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset);
640 if (V->getType()->isVectorTy())
641 return ConstantVector::getSplat(V->getType()->getVectorNumElements(),
646 /// Compute the constant difference between two pointer values.
647 /// If the difference is not a constant, returns zero.
648 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
650 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
651 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
653 // If LHS and RHS are not related via constant offsets to the same base
654 // value, there is nothing we can do here.
658 // Otherwise, the difference of LHS - RHS can be computed as:
660 // = (LHSOffset + Base) - (RHSOffset + Base)
661 // = LHSOffset - RHSOffset
662 return ConstantExpr::getSub(LHSOffset, RHSOffset);
665 /// Given operands for a Sub, see if we can fold the result.
666 /// If not, this returns null.
667 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
668 const SimplifyQuery &Q, unsigned MaxRecurse) {
669 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
672 // X - undef -> undef
673 // undef - X -> undef
674 if (match(Op0, m_Undef()) || match(Op1, m_Undef()))
675 return UndefValue::get(Op0->getType());
678 if (match(Op1, m_Zero()))
683 return Constant::getNullValue(Op0->getType());
685 // Is this a negation?
686 if (match(Op0, m_Zero())) {
687 // 0 - X -> 0 if the sub is NUW.
689 return Constant::getNullValue(Op0->getType());
691 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
692 if (Known.Zero.isMaxSignedValue()) {
693 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
694 // Op1 must be 0 because negating the minimum signed value is undefined.
696 return Constant::getNullValue(Op0->getType());
698 // 0 - X -> X if X is 0 or the minimum signed value.
703 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
704 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
705 Value *X = nullptr, *Y = nullptr, *Z = Op1;
706 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
707 // See if "V === Y - Z" simplifies.
708 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
709 // It does! Now see if "X + V" simplifies.
710 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
711 // It does, we successfully reassociated!
715 // See if "V === X - Z" simplifies.
716 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
717 // It does! Now see if "Y + V" simplifies.
718 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
719 // It does, we successfully reassociated!
725 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
726 // For example, X - (X + 1) -> -1
728 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
729 // See if "V === X - Y" simplifies.
730 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
731 // It does! Now see if "V - Z" simplifies.
732 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
733 // It does, we successfully reassociated!
737 // See if "V === X - Z" simplifies.
738 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
739 // It does! Now see if "V - Y" simplifies.
740 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
741 // It does, we successfully reassociated!
747 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
748 // For example, X - (X - Y) -> Y.
750 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
751 // See if "V === Z - X" simplifies.
752 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
753 // It does! Now see if "V + Y" simplifies.
754 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
755 // It does, we successfully reassociated!
760 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
761 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
762 match(Op1, m_Trunc(m_Value(Y))))
763 if (X->getType() == Y->getType())
764 // See if "V === X - Y" simplifies.
765 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
766 // It does! Now see if "trunc V" simplifies.
767 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(),
769 // It does, return the simplified "trunc V".
772 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
773 if (match(Op0, m_PtrToInt(m_Value(X))) &&
774 match(Op1, m_PtrToInt(m_Value(Y))))
775 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
776 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
779 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
780 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
783 // Threading Sub over selects and phi nodes is pointless, so don't bother.
784 // Threading over the select in "A - select(cond, B, C)" means evaluating
785 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
786 // only if B and C are equal. If B and C are equal then (since we assume
787 // that operands have already been simplified) "select(cond, B, C)" should
788 // have been simplified to the common value of B and C already. Analysing
789 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
790 // for threading over phi nodes.
795 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
796 const SimplifyQuery &Q) {
797 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
800 /// Given operands for a Mul, see if we can fold the result.
801 /// If not, this returns null.
802 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
803 unsigned MaxRecurse) {
804 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
809 if (match(Op1, m_CombineOr(m_Undef(), m_Zero())))
810 return Constant::getNullValue(Op0->getType());
813 if (match(Op1, m_One()))
816 // (X / Y) * Y -> X if the division is exact.
818 if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
819 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
823 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
824 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
827 // Try some generic simplifications for associative operations.
828 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
832 // Mul distributes over Add. Try some generic simplifications based on this.
833 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add,
837 // If the operation is with the result of a select instruction, check whether
838 // operating on either branch of the select always yields the same value.
839 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
840 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
844 // If the operation is with the result of a phi instruction, check whether
845 // operating on all incoming values of the phi always yields the same value.
846 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
847 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
854 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
855 return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit);
858 /// Check for common or similar folds of integer division or integer remainder.
859 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
860 static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) {
861 Type *Ty = Op0->getType();
863 // X / undef -> undef
864 // X % undef -> undef
865 if (match(Op1, m_Undef()))
870 // We don't need to preserve faults!
871 if (match(Op1, m_Zero()))
872 return UndefValue::get(Ty);
874 // If any element of a constant divisor vector is zero or undef, the whole op
876 auto *Op1C = dyn_cast<Constant>(Op1);
877 if (Op1C && Ty->isVectorTy()) {
878 unsigned NumElts = Ty->getVectorNumElements();
879 for (unsigned i = 0; i != NumElts; ++i) {
880 Constant *Elt = Op1C->getAggregateElement(i);
881 if (Elt && (Elt->isNullValue() || isa<UndefValue>(Elt)))
882 return UndefValue::get(Ty);
888 if (match(Op0, m_Undef()))
889 return Constant::getNullValue(Ty);
893 if (match(Op0, m_Zero()))
894 return Constant::getNullValue(Op0->getType());
899 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
903 // If this is a boolean op (single-bit element type), we can't have
904 // division-by-zero or remainder-by-zero, so assume the divisor is 1.
905 if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1))
906 return IsDiv ? Op0 : Constant::getNullValue(Ty);
911 /// Given a predicate and two operands, return true if the comparison is true.
912 /// This is a helper for div/rem simplification where we return some other value
913 /// when we can prove a relationship between the operands.
914 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
915 const SimplifyQuery &Q, unsigned MaxRecurse) {
916 Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
917 Constant *C = dyn_cast_or_null<Constant>(V);
918 return (C && C->isAllOnesValue());
921 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
922 /// to simplify X % Y to X.
923 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
924 unsigned MaxRecurse, bool IsSigned) {
925 // Recursion is always used, so bail out at once if we already hit the limit.
932 // We require that 1 operand is a simple constant. That could be extended to
933 // 2 variables if we computed the sign bit for each.
935 // Make sure that a constant is not the minimum signed value because taking
936 // the abs() of that is undefined.
937 Type *Ty = X->getType();
939 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
940 // Is the variable divisor magnitude always greater than the constant
941 // dividend magnitude?
942 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
943 Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
944 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
945 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
946 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
949 if (match(Y, m_APInt(C))) {
950 // Special-case: we can't take the abs() of a minimum signed value. If
951 // that's the divisor, then all we have to do is prove that the dividend
952 // is also not the minimum signed value.
953 if (C->isMinSignedValue())
954 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
956 // Is the variable dividend magnitude always less than the constant
957 // divisor magnitude?
958 // |X| < |C| --> X > -abs(C) and X < abs(C)
959 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
960 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
961 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
962 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
968 // IsSigned == false.
969 // Is the dividend unsigned less than the divisor?
970 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
973 /// These are simplifications common to SDiv and UDiv.
974 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
975 const SimplifyQuery &Q, unsigned MaxRecurse) {
976 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
979 if (Value *V = simplifyDivRem(Op0, Op1, true))
982 bool IsSigned = Opcode == Instruction::SDiv;
984 // (X * Y) / Y -> X if the multiplication does not overflow.
986 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
987 auto *Mul = cast<OverflowingBinaryOperator>(Op0);
988 // If the Mul does not overflow, then we are good to go.
989 if ((IsSigned && Mul->hasNoSignedWrap()) ||
990 (!IsSigned && Mul->hasNoUnsignedWrap()))
992 // If X has the form X = A / Y, then X * Y cannot overflow.
993 if ((IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
994 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1)))))
998 // (X rem Y) / Y -> 0
999 if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1000 (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1001 return Constant::getNullValue(Op0->getType());
1003 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
1004 ConstantInt *C1, *C2;
1005 if (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) &&
1006 match(Op1, m_ConstantInt(C2))) {
1008 (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
1010 return Constant::getNullValue(Op0->getType());
1013 // If the operation is with the result of a select instruction, check whether
1014 // operating on either branch of the select always yields the same value.
1015 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1016 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1019 // If the operation is with the result of a phi instruction, check whether
1020 // operating on all incoming values of the phi always yields the same value.
1021 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1022 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1025 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1026 return Constant::getNullValue(Op0->getType());
1031 /// These are simplifications common to SRem and URem.
1032 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1033 const SimplifyQuery &Q, unsigned MaxRecurse) {
1034 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1037 if (Value *V = simplifyDivRem(Op0, Op1, false))
1040 // (X % Y) % Y -> X % Y
1041 if ((Opcode == Instruction::SRem &&
1042 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1043 (Opcode == Instruction::URem &&
1044 match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1047 // (X << Y) % X -> 0
1048 if ((Opcode == Instruction::SRem &&
1049 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1050 (Opcode == Instruction::URem &&
1051 match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))
1052 return Constant::getNullValue(Op0->getType());
1054 // If the operation is with the result of a select instruction, check whether
1055 // operating on either branch of the select always yields the same value.
1056 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1057 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1060 // If the operation is with the result of a phi instruction, check whether
1061 // operating on all incoming values of the phi always yields the same value.
1062 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1063 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1066 // If X / Y == 0, then X % Y == X.
1067 if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem))
1073 /// Given operands for an SDiv, see if we can fold the result.
1074 /// If not, this returns null.
1075 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1076 unsigned MaxRecurse) {
1077 return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse);
1080 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1081 return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit);
1084 /// Given operands for a UDiv, see if we can fold the result.
1085 /// If not, this returns null.
1086 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1087 unsigned MaxRecurse) {
1088 return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse);
1091 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1092 return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit);
1095 /// Given operands for an SRem, see if we can fold the result.
1096 /// If not, this returns null.
1097 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1098 unsigned MaxRecurse) {
1099 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1102 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1103 return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit);
1106 /// Given operands for a URem, see if we can fold the result.
1107 /// If not, this returns null.
1108 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1109 unsigned MaxRecurse) {
1110 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1113 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1114 return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit);
1117 /// Returns true if a shift by \c Amount always yields undef.
1118 static bool isUndefShift(Value *Amount) {
1119 Constant *C = dyn_cast<Constant>(Amount);
1123 // X shift by undef -> undef because it may shift by the bitwidth.
1124 if (isa<UndefValue>(C))
1127 // Shifting by the bitwidth or more is undefined.
1128 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1129 if (CI->getValue().getLimitedValue() >=
1130 CI->getType()->getScalarSizeInBits())
1133 // If all lanes of a vector shift are undefined the whole shift is.
1134 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1135 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I)
1136 if (!isUndefShift(C->getAggregateElement(I)))
1144 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1145 /// If not, this returns null.
1146 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1147 Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse) {
1148 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1151 // 0 shift by X -> 0
1152 if (match(Op0, m_Zero()))
1153 return Constant::getNullValue(Op0->getType());
1155 // X shift by 0 -> X
1156 if (match(Op1, m_Zero()))
1159 // Fold undefined shifts.
1160 if (isUndefShift(Op1))
1161 return UndefValue::get(Op0->getType());
1163 // If the operation is with the result of a select instruction, check whether
1164 // operating on either branch of the select always yields the same value.
1165 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1166 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1169 // If the operation is with the result of a phi instruction, check whether
1170 // operating on all incoming values of the phi always yields the same value.
1171 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1172 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1175 // If any bits in the shift amount make that value greater than or equal to
1176 // the number of bits in the type, the shift is undefined.
1177 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1178 if (Known.One.getLimitedValue() >= Known.getBitWidth())
1179 return UndefValue::get(Op0->getType());
1181 // If all valid bits in the shift amount are known zero, the first operand is
1183 unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth());
1184 if (Known.countMinTrailingZeros() >= NumValidShiftBits)
1190 /// Given operands for an Shl, LShr or AShr, see if we can
1191 /// fold the result. If not, this returns null.
1192 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1193 Value *Op1, bool isExact, const SimplifyQuery &Q,
1194 unsigned MaxRecurse) {
1195 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse))
1200 return Constant::getNullValue(Op0->getType());
1203 // undef >> X -> undef (if it's exact)
1204 if (match(Op0, m_Undef()))
1205 return isExact ? Op0 : Constant::getNullValue(Op0->getType());
1207 // The low bit cannot be shifted out of an exact shift if it is set.
1209 KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1210 if (Op0Known.One[0])
1217 /// Given operands for an Shl, see if we can fold the result.
1218 /// If not, this returns null.
1219 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1220 const SimplifyQuery &Q, unsigned MaxRecurse) {
1221 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse))
1225 // undef << X -> undef if (if it's NSW/NUW)
1226 if (match(Op0, m_Undef()))
1227 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType());
1229 // (X >> A) << A -> X
1231 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1234 // shl nuw i8 C, %x -> C iff C has sign bit set.
1235 if (isNUW && match(Op0, m_Negative()))
1237 // NOTE: could use computeKnownBits() / LazyValueInfo,
1238 // but the cost-benefit analysis suggests it isn't worth it.
1243 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1244 const SimplifyQuery &Q) {
1245 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
1248 /// Given operands for an LShr, see if we can fold the result.
1249 /// If not, this returns null.
1250 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1251 const SimplifyQuery &Q, unsigned MaxRecurse) {
1252 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q,
1256 // (X << A) >> A -> X
1258 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1264 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1265 const SimplifyQuery &Q) {
1266 return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1269 /// Given operands for an AShr, see if we can fold the result.
1270 /// If not, this returns null.
1271 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1272 const SimplifyQuery &Q, unsigned MaxRecurse) {
1273 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q,
1277 // all ones >>a X -> -1
1278 // Do not return Op0 because it may contain undef elements if it's a vector.
1279 if (match(Op0, m_AllOnes()))
1280 return Constant::getAllOnesValue(Op0->getType());
1282 // (X << A) >> A -> X
1284 if (match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1287 // Arithmetic shifting an all-sign-bit value is a no-op.
1288 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1289 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1295 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1296 const SimplifyQuery &Q) {
1297 return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1300 /// Commuted variants are assumed to be handled by calling this function again
1301 /// with the parameters swapped.
1302 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1303 ICmpInst *UnsignedICmp, bool IsAnd) {
1306 ICmpInst::Predicate EqPred;
1307 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1308 !ICmpInst::isEquality(EqPred))
1311 ICmpInst::Predicate UnsignedPred;
1312 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1313 ICmpInst::isUnsigned(UnsignedPred))
1315 else if (match(UnsignedICmp,
1316 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1317 ICmpInst::isUnsigned(UnsignedPred))
1318 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1322 // X < Y && Y != 0 --> X < Y
1323 // X < Y || Y != 0 --> Y != 0
1324 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1325 return IsAnd ? UnsignedICmp : ZeroICmp;
1327 // X >= Y || Y != 0 --> true
1328 // X >= Y || Y == 0 --> X >= Y
1329 if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd) {
1330 if (EqPred == ICmpInst::ICMP_NE)
1331 return getTrue(UnsignedICmp->getType());
1332 return UnsignedICmp;
1335 // X < Y && Y == 0 --> false
1336 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1338 return getFalse(UnsignedICmp->getType());
1343 /// Commuted variants are assumed to be handled by calling this function again
1344 /// with the parameters swapped.
1345 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1346 ICmpInst::Predicate Pred0, Pred1;
1348 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1349 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1352 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B).
1353 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1354 // can eliminate Op1 from this 'and'.
1355 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1358 // Check for any combination of predicates that are guaranteed to be disjoint.
1359 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1360 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) ||
1361 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) ||
1362 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT))
1363 return getFalse(Op0->getType());
1368 /// Commuted variants are assumed to be handled by calling this function again
1369 /// with the parameters swapped.
1370 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1371 ICmpInst::Predicate Pred0, Pred1;
1373 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1374 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1377 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
1378 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1379 // can eliminate Op0 from this 'or'.
1380 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1383 // Check for any combination of predicates that cover the entire range of
1385 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1386 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
1387 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
1388 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
1389 return getTrue(Op0->getType());
1394 /// Test if a pair of compares with a shared operand and 2 constants has an
1395 /// empty set intersection, full set union, or if one compare is a superset of
1397 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1399 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1400 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1403 const APInt *C0, *C1;
1404 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1405 !match(Cmp1->getOperand(1), m_APInt(C1)))
1408 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1409 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1411 // For and-of-compares, check if the intersection is empty:
1412 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1413 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1414 return getFalse(Cmp0->getType());
1416 // For or-of-compares, check if the union is full:
1417 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1418 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1419 return getTrue(Cmp0->getType());
1421 // Is one range a superset of the other?
1422 // If this is and-of-compares, take the smaller set:
1423 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1424 // If this is or-of-compares, take the larger set:
1425 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1426 if (Range0.contains(Range1))
1427 return IsAnd ? Cmp1 : Cmp0;
1428 if (Range1.contains(Range0))
1429 return IsAnd ? Cmp0 : Cmp1;
1434 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1,
1436 ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate();
1437 if (!match(Cmp0->getOperand(1), m_Zero()) ||
1438 !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1)
1441 if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ))
1444 // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)".
1445 Value *X = Cmp0->getOperand(0);
1446 Value *Y = Cmp1->getOperand(0);
1448 // If one of the compares is a masked version of a (not) null check, then
1449 // that compare implies the other, so we eliminate the other. Optionally, look
1450 // through a pointer-to-int cast to match a null check of a pointer type.
1452 // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0
1453 // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0
1454 // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0
1455 // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0
1456 if (match(Y, m_c_And(m_Specific(X), m_Value())) ||
1457 match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value())))
1460 // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0
1461 // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0
1462 // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0
1463 // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0
1464 if (match(X, m_c_And(m_Specific(Y), m_Value())) ||
1465 match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value())))
1471 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) {
1472 // (icmp (add V, C0), C1) & (icmp V, C0)
1473 ICmpInst::Predicate Pred0, Pred1;
1474 const APInt *C0, *C1;
1476 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1479 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1482 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1483 if (AddInst->getOperand(1) != Op1->getOperand(1))
1486 Type *ITy = Op0->getType();
1487 bool isNSW = AddInst->hasNoSignedWrap();
1488 bool isNUW = AddInst->hasNoUnsignedWrap();
1490 const APInt Delta = *C1 - *C0;
1491 if (C0->isStrictlyPositive()) {
1493 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1494 return getFalse(ITy);
1495 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1496 return getFalse(ITy);
1499 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1500 return getFalse(ITy);
1501 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1502 return getFalse(ITy);
1505 if (C0->getBoolValue() && isNUW) {
1507 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1508 return getFalse(ITy);
1510 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1511 return getFalse(ITy);
1517 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
1518 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
1520 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true))
1523 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
1525 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0))
1528 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1531 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true))
1534 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1))
1536 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0))
1542 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) {
1543 // (icmp (add V, C0), C1) | (icmp V, C0)
1544 ICmpInst::Predicate Pred0, Pred1;
1545 const APInt *C0, *C1;
1547 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1550 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1553 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1554 if (AddInst->getOperand(1) != Op1->getOperand(1))
1557 Type *ITy = Op0->getType();
1558 bool isNSW = AddInst->hasNoSignedWrap();
1559 bool isNUW = AddInst->hasNoUnsignedWrap();
1561 const APInt Delta = *C1 - *C0;
1562 if (C0->isStrictlyPositive()) {
1564 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1565 return getTrue(ITy);
1566 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1567 return getTrue(ITy);
1570 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1571 return getTrue(ITy);
1572 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1573 return getTrue(ITy);
1576 if (C0->getBoolValue() && isNUW) {
1578 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1579 return getTrue(ITy);
1581 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1582 return getTrue(ITy);
1588 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
1589 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
1591 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false))
1594 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
1596 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0))
1599 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1602 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false))
1605 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1))
1607 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0))
1613 static Value *simplifyAndOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) {
1614 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1615 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1616 if (LHS0->getType() != RHS0->getType())
1619 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1620 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1621 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1622 // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
1623 // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
1624 // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
1625 // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
1626 // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
1627 // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
1628 // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
1629 // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
1630 if ((isKnownNeverNaN(LHS0) && (LHS1 == RHS0 || LHS1 == RHS1)) ||
1631 (isKnownNeverNaN(LHS1) && (LHS0 == RHS0 || LHS0 == RHS1)))
1634 // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
1635 // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
1636 // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
1637 // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
1638 // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
1639 // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
1640 // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
1641 // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
1642 if ((isKnownNeverNaN(RHS0) && (RHS1 == LHS0 || RHS1 == LHS1)) ||
1643 (isKnownNeverNaN(RHS1) && (RHS0 == LHS0 || RHS0 == LHS1)))
1650 static Value *simplifyAndOrOfCmps(Value *Op0, Value *Op1, bool IsAnd) {
1651 // Look through casts of the 'and' operands to find compares.
1652 auto *Cast0 = dyn_cast<CastInst>(Op0);
1653 auto *Cast1 = dyn_cast<CastInst>(Op1);
1654 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1655 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1656 Op0 = Cast0->getOperand(0);
1657 Op1 = Cast1->getOperand(0);
1661 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1662 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1664 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1) :
1665 simplifyOrOfICmps(ICmp0, ICmp1);
1667 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1668 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1670 V = simplifyAndOrOfFCmps(FCmp0, FCmp1, IsAnd);
1677 // If we looked through casts, we can only handle a constant simplification
1678 // because we are not allowed to create a cast instruction here.
1679 if (auto *C = dyn_cast<Constant>(V))
1680 return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType());
1685 /// Given operands for an And, see if we can fold the result.
1686 /// If not, this returns null.
1687 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1688 unsigned MaxRecurse) {
1689 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
1693 if (match(Op1, m_Undef()))
1694 return Constant::getNullValue(Op0->getType());
1701 if (match(Op1, m_Zero()))
1702 return Constant::getNullValue(Op0->getType());
1705 if (match(Op1, m_AllOnes()))
1708 // A & ~A = ~A & A = 0
1709 if (match(Op0, m_Not(m_Specific(Op1))) ||
1710 match(Op1, m_Not(m_Specific(Op0))))
1711 return Constant::getNullValue(Op0->getType());
1714 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
1718 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value())))
1721 // A mask that only clears known zeros of a shifted value is a no-op.
1725 if (match(Op1, m_APInt(Mask))) {
1726 // If all bits in the inverted and shifted mask are clear:
1727 // and (shl X, ShAmt), Mask --> shl X, ShAmt
1728 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
1729 (~(*Mask)).lshr(*ShAmt).isNullValue())
1732 // If all bits in the inverted and shifted mask are clear:
1733 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
1734 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
1735 (~(*Mask)).shl(*ShAmt).isNullValue())
1739 // A & (-A) = A if A is a power of two or zero.
1740 if (match(Op0, m_Neg(m_Specific(Op1))) ||
1741 match(Op1, m_Neg(m_Specific(Op0)))) {
1742 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
1745 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
1750 if (Value *V = simplifyAndOrOfCmps(Op0, Op1, true))
1753 // Try some generic simplifications for associative operations.
1754 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
1758 // And distributes over Or. Try some generic simplifications based on this.
1759 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or,
1763 // And distributes over Xor. Try some generic simplifications based on this.
1764 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor,
1768 // If the operation is with the result of a select instruction, check whether
1769 // operating on either branch of the select always yields the same value.
1770 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1771 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
1775 // If the operation is with the result of a phi instruction, check whether
1776 // operating on all incoming values of the phi always yields the same value.
1777 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1778 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
1785 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1786 return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
1789 /// Given operands for an Or, see if we can fold the result.
1790 /// If not, this returns null.
1791 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1792 unsigned MaxRecurse) {
1793 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
1798 // Do not return Op1 because it may contain undef elements if it's a vector.
1799 if (match(Op1, m_Undef()) || match(Op1, m_AllOnes()))
1800 return Constant::getAllOnesValue(Op0->getType());
1804 if (Op0 == Op1 || match(Op1, m_Zero()))
1807 // A | ~A = ~A | A = -1
1808 if (match(Op0, m_Not(m_Specific(Op1))) ||
1809 match(Op1, m_Not(m_Specific(Op0))))
1810 return Constant::getAllOnesValue(Op0->getType());
1813 if (match(Op0, m_c_And(m_Specific(Op1), m_Value())))
1817 if (match(Op1, m_c_And(m_Specific(Op0), m_Value())))
1820 // ~(A & ?) | A = -1
1821 if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value()))))
1822 return Constant::getAllOnesValue(Op1->getType());
1824 // A | ~(A & ?) = -1
1825 if (match(Op1, m_Not(m_c_And(m_Specific(Op1), m_Value()))))
1826 return Constant::getAllOnesValue(Op0->getType());
1829 // (A & ~B) | (A ^ B) -> (A ^ B)
1830 // (~B & A) | (A ^ B) -> (A ^ B)
1831 // (A & ~B) | (B ^ A) -> (B ^ A)
1832 // (~B & A) | (B ^ A) -> (B ^ A)
1833 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
1834 (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
1835 match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
1838 // Commute the 'or' operands.
1839 // (A ^ B) | (A & ~B) -> (A ^ B)
1840 // (A ^ B) | (~B & A) -> (A ^ B)
1841 // (B ^ A) | (A & ~B) -> (B ^ A)
1842 // (B ^ A) | (~B & A) -> (B ^ A)
1843 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
1844 (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
1845 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
1848 // (A & B) | (~A ^ B) -> (~A ^ B)
1849 // (B & A) | (~A ^ B) -> (~A ^ B)
1850 // (A & B) | (B ^ ~A) -> (B ^ ~A)
1851 // (B & A) | (B ^ ~A) -> (B ^ ~A)
1852 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1853 (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
1854 match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
1857 // (~A ^ B) | (A & B) -> (~A ^ B)
1858 // (~A ^ B) | (B & A) -> (~A ^ B)
1859 // (B ^ ~A) | (A & B) -> (B ^ ~A)
1860 // (B ^ ~A) | (B & A) -> (B ^ ~A)
1861 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
1862 (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
1863 match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
1866 if (Value *V = simplifyAndOrOfCmps(Op0, Op1, false))
1869 // Try some generic simplifications for associative operations.
1870 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
1874 // Or distributes over And. Try some generic simplifications based on this.
1875 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q,
1879 // If the operation is with the result of a select instruction, check whether
1880 // operating on either branch of the select always yields the same value.
1881 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1882 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
1886 // (A & C1)|(B & C2)
1887 const APInt *C1, *C2;
1888 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
1889 match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
1891 // (A & C1)|(B & C2)
1892 // If we have: ((V + N) & C1) | (V & C2)
1893 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
1894 // replace with V+N.
1896 if (C2->isMask() && // C2 == 0+1+
1897 match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
1898 // Add commutes, try both ways.
1899 if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1902 // Or commutes, try both ways.
1904 match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
1905 // Add commutes, try both ways.
1906 if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1912 // If the operation is with the result of a phi instruction, check whether
1913 // operating on all incoming values of the phi always yields the same value.
1914 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1915 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
1921 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1922 return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit);
1925 /// Given operands for a Xor, see if we can fold the result.
1926 /// If not, this returns null.
1927 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1928 unsigned MaxRecurse) {
1929 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
1932 // A ^ undef -> undef
1933 if (match(Op1, m_Undef()))
1937 if (match(Op1, m_Zero()))
1942 return Constant::getNullValue(Op0->getType());
1944 // A ^ ~A = ~A ^ A = -1
1945 if (match(Op0, m_Not(m_Specific(Op1))) ||
1946 match(Op1, m_Not(m_Specific(Op0))))
1947 return Constant::getAllOnesValue(Op0->getType());
1949 // Try some generic simplifications for associative operations.
1950 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
1954 // Threading Xor over selects and phi nodes is pointless, so don't bother.
1955 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
1956 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
1957 // only if B and C are equal. If B and C are equal then (since we assume
1958 // that operands have already been simplified) "select(cond, B, C)" should
1959 // have been simplified to the common value of B and C already. Analysing
1960 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
1961 // for threading over phi nodes.
1966 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1967 return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit);
1971 static Type *GetCompareTy(Value *Op) {
1972 return CmpInst::makeCmpResultType(Op->getType());
1975 /// Rummage around inside V looking for something equivalent to the comparison
1976 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
1977 /// Helper function for analyzing max/min idioms.
1978 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
1979 Value *LHS, Value *RHS) {
1980 SelectInst *SI = dyn_cast<SelectInst>(V);
1983 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
1986 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
1987 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
1989 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
1990 LHS == CmpRHS && RHS == CmpLHS)
1995 // A significant optimization not implemented here is assuming that alloca
1996 // addresses are not equal to incoming argument values. They don't *alias*,
1997 // as we say, but that doesn't mean they aren't equal, so we take a
1998 // conservative approach.
2000 // This is inspired in part by C++11 5.10p1:
2001 // "Two pointers of the same type compare equal if and only if they are both
2002 // null, both point to the same function, or both represent the same
2005 // This is pretty permissive.
2007 // It's also partly due to C11 6.5.9p6:
2008 // "Two pointers compare equal if and only if both are null pointers, both are
2009 // pointers to the same object (including a pointer to an object and a
2010 // subobject at its beginning) or function, both are pointers to one past the
2011 // last element of the same array object, or one is a pointer to one past the
2012 // end of one array object and the other is a pointer to the start of a
2013 // different array object that happens to immediately follow the first array
2014 // object in the address space.)
2016 // C11's version is more restrictive, however there's no reason why an argument
2017 // couldn't be a one-past-the-end value for a stack object in the caller and be
2018 // equal to the beginning of a stack object in the callee.
2020 // If the C and C++ standards are ever made sufficiently restrictive in this
2021 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2022 // this optimization.
2024 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
2025 const DominatorTree *DT, CmpInst::Predicate Pred,
2026 AssumptionCache *AC, const Instruction *CxtI,
2027 Value *LHS, Value *RHS) {
2028 // First, skip past any trivial no-ops.
2029 LHS = LHS->stripPointerCasts();
2030 RHS = RHS->stripPointerCasts();
2032 // A non-null pointer is not equal to a null pointer.
2033 if (llvm::isKnownNonZero(LHS, DL) && isa<ConstantPointerNull>(RHS) &&
2034 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
2035 return ConstantInt::get(GetCompareTy(LHS),
2036 !CmpInst::isTrueWhenEqual(Pred));
2038 // We can only fold certain predicates on pointer comparisons.
2043 // Equality comaprisons are easy to fold.
2044 case CmpInst::ICMP_EQ:
2045 case CmpInst::ICMP_NE:
2048 // We can only handle unsigned relational comparisons because 'inbounds' on
2049 // a GEP only protects against unsigned wrapping.
2050 case CmpInst::ICMP_UGT:
2051 case CmpInst::ICMP_UGE:
2052 case CmpInst::ICMP_ULT:
2053 case CmpInst::ICMP_ULE:
2054 // However, we have to switch them to their signed variants to handle
2055 // negative indices from the base pointer.
2056 Pred = ICmpInst::getSignedPredicate(Pred);
2060 // Strip off any constant offsets so that we can reason about them.
2061 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2062 // here and compare base addresses like AliasAnalysis does, however there are
2063 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2064 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2065 // doesn't need to guarantee pointer inequality when it says NoAlias.
2066 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
2067 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
2069 // If LHS and RHS are related via constant offsets to the same base
2070 // value, we can replace it with an icmp which just compares the offsets.
2072 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset);
2074 // Various optimizations for (in)equality comparisons.
2075 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2076 // Different non-empty allocations that exist at the same time have
2077 // different addresses (if the program can tell). Global variables always
2078 // exist, so they always exist during the lifetime of each other and all
2079 // allocas. Two different allocas usually have different addresses...
2081 // However, if there's an @llvm.stackrestore dynamically in between two
2082 // allocas, they may have the same address. It's tempting to reduce the
2083 // scope of the problem by only looking at *static* allocas here. That would
2084 // cover the majority of allocas while significantly reducing the likelihood
2085 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2086 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2087 // an entry block. Also, if we have a block that's not attached to a
2088 // function, we can't tell if it's "static" under the current definition.
2089 // Theoretically, this problem could be fixed by creating a new kind of
2090 // instruction kind specifically for static allocas. Such a new instruction
2091 // could be required to be at the top of the entry block, thus preventing it
2092 // from being subject to a @llvm.stackrestore. Instcombine could even
2093 // convert regular allocas into these special allocas. It'd be nifty.
2094 // However, until then, this problem remains open.
2096 // So, we'll assume that two non-empty allocas have different addresses
2099 // With all that, if the offsets are within the bounds of their allocations
2100 // (and not one-past-the-end! so we can't use inbounds!), and their
2101 // allocations aren't the same, the pointers are not equal.
2103 // Note that it's not necessary to check for LHS being a global variable
2104 // address, due to canonicalization and constant folding.
2105 if (isa<AllocaInst>(LHS) &&
2106 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
2107 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset);
2108 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
2109 uint64_t LHSSize, RHSSize;
2110 if (LHSOffsetCI && RHSOffsetCI &&
2111 getObjectSize(LHS, LHSSize, DL, TLI) &&
2112 getObjectSize(RHS, RHSSize, DL, TLI)) {
2113 const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
2114 const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
2115 if (!LHSOffsetValue.isNegative() &&
2116 !RHSOffsetValue.isNegative() &&
2117 LHSOffsetValue.ult(LHSSize) &&
2118 RHSOffsetValue.ult(RHSSize)) {
2119 return ConstantInt::get(GetCompareTy(LHS),
2120 !CmpInst::isTrueWhenEqual(Pred));
2124 // Repeat the above check but this time without depending on DataLayout
2125 // or being able to compute a precise size.
2126 if (!cast<PointerType>(LHS->getType())->isEmptyTy() &&
2127 !cast<PointerType>(RHS->getType())->isEmptyTy() &&
2128 LHSOffset->isNullValue() &&
2129 RHSOffset->isNullValue())
2130 return ConstantInt::get(GetCompareTy(LHS),
2131 !CmpInst::isTrueWhenEqual(Pred));
2134 // Even if an non-inbounds GEP occurs along the path we can still optimize
2135 // equality comparisons concerning the result. We avoid walking the whole
2136 // chain again by starting where the last calls to
2137 // stripAndComputeConstantOffsets left off and accumulate the offsets.
2138 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
2139 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
2141 return ConstantExpr::getICmp(Pred,
2142 ConstantExpr::getAdd(LHSOffset, LHSNoBound),
2143 ConstantExpr::getAdd(RHSOffset, RHSNoBound));
2145 // If one side of the equality comparison must come from a noalias call
2146 // (meaning a system memory allocation function), and the other side must
2147 // come from a pointer that cannot overlap with dynamically-allocated
2148 // memory within the lifetime of the current function (allocas, byval
2149 // arguments, globals), then determine the comparison result here.
2150 SmallVector<Value *, 8> LHSUObjs, RHSUObjs;
2151 GetUnderlyingObjects(LHS, LHSUObjs, DL);
2152 GetUnderlyingObjects(RHS, RHSUObjs, DL);
2154 // Is the set of underlying objects all noalias calls?
2155 auto IsNAC = [](ArrayRef<Value *> Objects) {
2156 return all_of(Objects, isNoAliasCall);
2159 // Is the set of underlying objects all things which must be disjoint from
2160 // noalias calls. For allocas, we consider only static ones (dynamic
2161 // allocas might be transformed into calls to malloc not simultaneously
2162 // live with the compared-to allocation). For globals, we exclude symbols
2163 // that might be resolve lazily to symbols in another dynamically-loaded
2164 // library (and, thus, could be malloc'ed by the implementation).
2165 auto IsAllocDisjoint = [](ArrayRef<Value *> Objects) {
2166 return all_of(Objects, [](Value *V) {
2167 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2168 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2169 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2170 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2171 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2172 !GV->isThreadLocal();
2173 if (const Argument *A = dyn_cast<Argument>(V))
2174 return A->hasByValAttr();
2179 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2180 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2181 return ConstantInt::get(GetCompareTy(LHS),
2182 !CmpInst::isTrueWhenEqual(Pred));
2184 // Fold comparisons for non-escaping pointer even if the allocation call
2185 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2186 // dynamic allocation call could be either of the operands.
2187 Value *MI = nullptr;
2188 if (isAllocLikeFn(LHS, TLI) &&
2189 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
2191 else if (isAllocLikeFn(RHS, TLI) &&
2192 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
2194 // FIXME: We should also fold the compare when the pointer escapes, but the
2195 // compare dominates the pointer escape
2196 if (MI && !PointerMayBeCaptured(MI, true, true))
2197 return ConstantInt::get(GetCompareTy(LHS),
2198 CmpInst::isFalseWhenEqual(Pred));
2205 /// Fold an icmp when its operands have i1 scalar type.
2206 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2207 Value *RHS, const SimplifyQuery &Q) {
2208 Type *ITy = GetCompareTy(LHS); // The return type.
2209 Type *OpTy = LHS->getType(); // The operand type.
2210 if (!OpTy->isIntOrIntVectorTy(1))
2213 // A boolean compared to true/false can be simplified in 14 out of the 20
2214 // (10 predicates * 2 constants) possible combinations. Cases not handled here
2215 // require a 'not' of the LHS, so those must be transformed in InstCombine.
2216 if (match(RHS, m_Zero())) {
2218 case CmpInst::ICMP_NE: // X != 0 -> X
2219 case CmpInst::ICMP_UGT: // X >u 0 -> X
2220 case CmpInst::ICMP_SLT: // X <s 0 -> X
2223 case CmpInst::ICMP_ULT: // X <u 0 -> false
2224 case CmpInst::ICMP_SGT: // X >s 0 -> false
2225 return getFalse(ITy);
2227 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2228 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2229 return getTrue(ITy);
2233 } else if (match(RHS, m_One())) {
2235 case CmpInst::ICMP_EQ: // X == 1 -> X
2236 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2237 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2240 case CmpInst::ICMP_UGT: // X >u 1 -> false
2241 case CmpInst::ICMP_SLT: // X <s -1 -> false
2242 return getFalse(ITy);
2244 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2245 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2246 return getTrue(ITy);
2255 case ICmpInst::ICMP_UGE:
2256 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false))
2257 return getTrue(ITy);
2259 case ICmpInst::ICMP_SGE:
2260 /// For signed comparison, the values for an i1 are 0 and -1
2261 /// respectively. This maps into a truth table of:
2262 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2263 /// 0 | 0 | 1 (0 >= 0) | 1
2264 /// 0 | 1 | 1 (0 >= -1) | 1
2265 /// 1 | 0 | 0 (-1 >= 0) | 0
2266 /// 1 | 1 | 1 (-1 >= -1) | 1
2267 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2268 return getTrue(ITy);
2270 case ICmpInst::ICMP_ULE:
2271 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2272 return getTrue(ITy);
2279 /// Try hard to fold icmp with zero RHS because this is a common case.
2280 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2281 Value *RHS, const SimplifyQuery &Q) {
2282 if (!match(RHS, m_Zero()))
2285 Type *ITy = GetCompareTy(LHS); // The return type.
2288 llvm_unreachable("Unknown ICmp predicate!");
2289 case ICmpInst::ICMP_ULT:
2290 return getFalse(ITy);
2291 case ICmpInst::ICMP_UGE:
2292 return getTrue(ITy);
2293 case ICmpInst::ICMP_EQ:
2294 case ICmpInst::ICMP_ULE:
2295 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2296 return getFalse(ITy);
2298 case ICmpInst::ICMP_NE:
2299 case ICmpInst::ICMP_UGT:
2300 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2301 return getTrue(ITy);
2303 case ICmpInst::ICMP_SLT: {
2304 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2305 if (LHSKnown.isNegative())
2306 return getTrue(ITy);
2307 if (LHSKnown.isNonNegative())
2308 return getFalse(ITy);
2311 case ICmpInst::ICMP_SLE: {
2312 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2313 if (LHSKnown.isNegative())
2314 return getTrue(ITy);
2315 if (LHSKnown.isNonNegative() &&
2316 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2317 return getFalse(ITy);
2320 case ICmpInst::ICMP_SGE: {
2321 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2322 if (LHSKnown.isNegative())
2323 return getFalse(ITy);
2324 if (LHSKnown.isNonNegative())
2325 return getTrue(ITy);
2328 case ICmpInst::ICMP_SGT: {
2329 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2330 if (LHSKnown.isNegative())
2331 return getFalse(ITy);
2332 if (LHSKnown.isNonNegative() &&
2333 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2334 return getTrue(ITy);
2342 /// Many binary operators with a constant operand have an easy-to-compute
2343 /// range of outputs. This can be used to fold a comparison to always true or
2345 static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper) {
2346 unsigned Width = Lower.getBitWidth();
2348 switch (BO.getOpcode()) {
2349 case Instruction::Add:
2350 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
2351 // FIXME: If we have both nuw and nsw, we should reduce the range further.
2352 if (BO.hasNoUnsignedWrap()) {
2353 // 'add nuw x, C' produces [C, UINT_MAX].
2355 } else if (BO.hasNoSignedWrap()) {
2356 if (C->isNegative()) {
2357 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
2358 Lower = APInt::getSignedMinValue(Width);
2359 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
2361 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
2362 Lower = APInt::getSignedMinValue(Width) + *C;
2363 Upper = APInt::getSignedMaxValue(Width) + 1;
2369 case Instruction::And:
2370 if (match(BO.getOperand(1), m_APInt(C)))
2371 // 'and x, C' produces [0, C].
2375 case Instruction::Or:
2376 if (match(BO.getOperand(1), m_APInt(C)))
2377 // 'or x, C' produces [C, UINT_MAX].
2381 case Instruction::AShr:
2382 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
2383 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
2384 Lower = APInt::getSignedMinValue(Width).ashr(*C);
2385 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
2386 } else if (match(BO.getOperand(0), m_APInt(C))) {
2387 unsigned ShiftAmount = Width - 1;
2388 if (!C->isNullValue() && BO.isExact())
2389 ShiftAmount = C->countTrailingZeros();
2390 if (C->isNegative()) {
2391 // 'ashr C, x' produces [C, C >> (Width-1)]
2393 Upper = C->ashr(ShiftAmount) + 1;
2395 // 'ashr C, x' produces [C >> (Width-1), C]
2396 Lower = C->ashr(ShiftAmount);
2402 case Instruction::LShr:
2403 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
2404 // 'lshr x, C' produces [0, UINT_MAX >> C].
2405 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
2406 } else if (match(BO.getOperand(0), m_APInt(C))) {
2407 // 'lshr C, x' produces [C >> (Width-1), C].
2408 unsigned ShiftAmount = Width - 1;
2409 if (!C->isNullValue() && BO.isExact())
2410 ShiftAmount = C->countTrailingZeros();
2411 Lower = C->lshr(ShiftAmount);
2416 case Instruction::Shl:
2417 if (match(BO.getOperand(0), m_APInt(C))) {
2418 if (BO.hasNoUnsignedWrap()) {
2419 // 'shl nuw C, x' produces [C, C << CLZ(C)]
2421 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
2422 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
2423 if (C->isNegative()) {
2424 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
2425 unsigned ShiftAmount = C->countLeadingOnes() - 1;
2426 Lower = C->shl(ShiftAmount);
2429 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
2430 unsigned ShiftAmount = C->countLeadingZeros() - 1;
2432 Upper = C->shl(ShiftAmount) + 1;
2438 case Instruction::SDiv:
2439 if (match(BO.getOperand(1), m_APInt(C))) {
2440 APInt IntMin = APInt::getSignedMinValue(Width);
2441 APInt IntMax = APInt::getSignedMaxValue(Width);
2442 if (C->isAllOnesValue()) {
2443 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
2444 // where C != -1 and C != 0 and C != 1
2447 } else if (C->countLeadingZeros() < Width - 1) {
2448 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
2449 // where C != -1 and C != 0 and C != 1
2450 Lower = IntMin.sdiv(*C);
2451 Upper = IntMax.sdiv(*C);
2452 if (Lower.sgt(Upper))
2453 std::swap(Lower, Upper);
2455 assert(Upper != Lower && "Upper part of range has wrapped!");
2457 } else if (match(BO.getOperand(0), m_APInt(C))) {
2458 if (C->isMinSignedValue()) {
2459 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
2461 Upper = Lower.lshr(1) + 1;
2463 // 'sdiv C, x' produces [-|C|, |C|].
2464 Upper = C->abs() + 1;
2465 Lower = (-Upper) + 1;
2470 case Instruction::UDiv:
2471 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
2472 // 'udiv x, C' produces [0, UINT_MAX / C].
2473 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
2474 } else if (match(BO.getOperand(0), m_APInt(C))) {
2475 // 'udiv C, x' produces [0, C].
2480 case Instruction::SRem:
2481 if (match(BO.getOperand(1), m_APInt(C))) {
2482 // 'srem x, C' produces (-|C|, |C|).
2484 Lower = (-Upper) + 1;
2488 case Instruction::URem:
2489 if (match(BO.getOperand(1), m_APInt(C)))
2490 // 'urem x, C' produces [0, C).
2499 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
2501 Type *ITy = GetCompareTy(RHS); // The return type.
2504 // Sign-bit checks can be optimized to true/false after unsigned
2505 // floating-point casts:
2506 // icmp slt (bitcast (uitofp X)), 0 --> false
2507 // icmp sgt (bitcast (uitofp X)), -1 --> true
2508 if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
2509 if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
2510 return ConstantInt::getFalse(ITy);
2511 if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
2512 return ConstantInt::getTrue(ITy);
2516 if (!match(RHS, m_APInt(C)))
2519 // Rule out tautological comparisons (eg., ult 0 or uge 0).
2520 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
2521 if (RHS_CR.isEmptySet())
2522 return ConstantInt::getFalse(ITy);
2523 if (RHS_CR.isFullSet())
2524 return ConstantInt::getTrue(ITy);
2526 // Find the range of possible values for binary operators.
2527 unsigned Width = C->getBitWidth();
2528 APInt Lower = APInt(Width, 0);
2529 APInt Upper = APInt(Width, 0);
2530 if (auto *BO = dyn_cast<BinaryOperator>(LHS))
2531 setLimitsForBinOp(*BO, Lower, Upper);
2533 ConstantRange LHS_CR =
2534 Lower != Upper ? ConstantRange(Lower, Upper) : ConstantRange(Width, true);
2536 if (auto *I = dyn_cast<Instruction>(LHS))
2537 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
2538 LHS_CR = LHS_CR.intersectWith(getConstantRangeFromMetadata(*Ranges));
2540 if (!LHS_CR.isFullSet()) {
2541 if (RHS_CR.contains(LHS_CR))
2542 return ConstantInt::getTrue(ITy);
2543 if (RHS_CR.inverse().contains(LHS_CR))
2544 return ConstantInt::getFalse(ITy);
2550 /// TODO: A large part of this logic is duplicated in InstCombine's
2551 /// foldICmpBinOp(). We should be able to share that and avoid the code
2553 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
2554 Value *RHS, const SimplifyQuery &Q,
2555 unsigned MaxRecurse) {
2556 Type *ITy = GetCompareTy(LHS); // The return type.
2558 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
2559 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
2560 if (MaxRecurse && (LBO || RBO)) {
2561 // Analyze the case when either LHS or RHS is an add instruction.
2562 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2563 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
2564 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
2565 if (LBO && LBO->getOpcode() == Instruction::Add) {
2566 A = LBO->getOperand(0);
2567 B = LBO->getOperand(1);
2569 ICmpInst::isEquality(Pred) ||
2570 (CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) ||
2571 (CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap());
2573 if (RBO && RBO->getOpcode() == Instruction::Add) {
2574 C = RBO->getOperand(0);
2575 D = RBO->getOperand(1);
2577 ICmpInst::isEquality(Pred) ||
2578 (CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) ||
2579 (CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap());
2582 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2583 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
2584 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
2585 Constant::getNullValue(RHS->getType()), Q,
2589 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2590 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
2592 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
2593 C == LHS ? D : C, Q, MaxRecurse - 1))
2596 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
2597 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem &&
2599 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2602 // C + B == C + D -> B == D
2605 } else if (A == D) {
2606 // D + B == C + D -> B == C
2609 } else if (B == C) {
2610 // A + C == C + D -> A == D
2615 // A + D == C + D -> A == C
2619 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
2626 // icmp pred (or X, Y), X
2627 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
2628 if (Pred == ICmpInst::ICMP_ULT)
2629 return getFalse(ITy);
2630 if (Pred == ICmpInst::ICMP_UGE)
2631 return getTrue(ITy);
2633 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2634 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2635 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2636 if (RHSKnown.isNonNegative() && YKnown.isNegative())
2637 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
2638 if (RHSKnown.isNegative() || YKnown.isNonNegative())
2639 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
2642 // icmp pred X, (or X, Y)
2643 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) {
2644 if (Pred == ICmpInst::ICMP_ULE)
2645 return getTrue(ITy);
2646 if (Pred == ICmpInst::ICMP_UGT)
2647 return getFalse(ITy);
2649 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) {
2650 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2651 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2652 if (LHSKnown.isNonNegative() && YKnown.isNegative())
2653 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy);
2654 if (LHSKnown.isNegative() || YKnown.isNonNegative())
2655 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy);
2660 // icmp pred (and X, Y), X
2661 if (LBO && match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) {
2662 if (Pred == ICmpInst::ICMP_UGT)
2663 return getFalse(ITy);
2664 if (Pred == ICmpInst::ICMP_ULE)
2665 return getTrue(ITy);
2667 // icmp pred X, (and X, Y)
2668 if (RBO && match(RBO, m_c_And(m_Value(), m_Specific(LHS)))) {
2669 if (Pred == ICmpInst::ICMP_UGE)
2670 return getTrue(ITy);
2671 if (Pred == ICmpInst::ICMP_ULT)
2672 return getFalse(ITy);
2675 // 0 - (zext X) pred C
2676 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
2677 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
2678 if (RHSC->getValue().isStrictlyPositive()) {
2679 if (Pred == ICmpInst::ICMP_SLT)
2680 return ConstantInt::getTrue(RHSC->getContext());
2681 if (Pred == ICmpInst::ICMP_SGE)
2682 return ConstantInt::getFalse(RHSC->getContext());
2683 if (Pred == ICmpInst::ICMP_EQ)
2684 return ConstantInt::getFalse(RHSC->getContext());
2685 if (Pred == ICmpInst::ICMP_NE)
2686 return ConstantInt::getTrue(RHSC->getContext());
2688 if (RHSC->getValue().isNonNegative()) {
2689 if (Pred == ICmpInst::ICMP_SLE)
2690 return ConstantInt::getTrue(RHSC->getContext());
2691 if (Pred == ICmpInst::ICMP_SGT)
2692 return ConstantInt::getFalse(RHSC->getContext());
2697 // icmp pred (urem X, Y), Y
2698 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
2702 case ICmpInst::ICMP_SGT:
2703 case ICmpInst::ICMP_SGE: {
2704 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2705 if (!Known.isNonNegative())
2709 case ICmpInst::ICMP_EQ:
2710 case ICmpInst::ICMP_UGT:
2711 case ICmpInst::ICMP_UGE:
2712 return getFalse(ITy);
2713 case ICmpInst::ICMP_SLT:
2714 case ICmpInst::ICMP_SLE: {
2715 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2716 if (!Known.isNonNegative())
2720 case ICmpInst::ICMP_NE:
2721 case ICmpInst::ICMP_ULT:
2722 case ICmpInst::ICMP_ULE:
2723 return getTrue(ITy);
2727 // icmp pred X, (urem Y, X)
2728 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) {
2732 case ICmpInst::ICMP_SGT:
2733 case ICmpInst::ICMP_SGE: {
2734 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2735 if (!Known.isNonNegative())
2739 case ICmpInst::ICMP_NE:
2740 case ICmpInst::ICMP_UGT:
2741 case ICmpInst::ICMP_UGE:
2742 return getTrue(ITy);
2743 case ICmpInst::ICMP_SLT:
2744 case ICmpInst::ICMP_SLE: {
2745 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2746 if (!Known.isNonNegative())
2750 case ICmpInst::ICMP_EQ:
2751 case ICmpInst::ICMP_ULT:
2752 case ICmpInst::ICMP_ULE:
2753 return getFalse(ITy);
2759 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
2760 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) {
2761 // icmp pred (X op Y), X
2762 if (Pred == ICmpInst::ICMP_UGT)
2763 return getFalse(ITy);
2764 if (Pred == ICmpInst::ICMP_ULE)
2765 return getTrue(ITy);
2770 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) ||
2771 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) {
2772 // icmp pred X, (X op Y)
2773 if (Pred == ICmpInst::ICMP_ULT)
2774 return getFalse(ITy);
2775 if (Pred == ICmpInst::ICMP_UGE)
2776 return getTrue(ITy);
2783 // where CI2 is a power of 2 and CI isn't
2784 if (auto *CI = dyn_cast<ConstantInt>(RHS)) {
2785 const APInt *CI2Val, *CIVal = &CI->getValue();
2786 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) &&
2787 CI2Val->isPowerOf2()) {
2788 if (!CIVal->isPowerOf2()) {
2789 // CI2 << X can equal zero in some circumstances,
2790 // this simplification is unsafe if CI is zero.
2792 // We know it is safe if:
2793 // - The shift is nsw, we can't shift out the one bit.
2794 // - The shift is nuw, we can't shift out the one bit.
2797 if (LBO->hasNoSignedWrap() || LBO->hasNoUnsignedWrap() ||
2798 CI2Val->isOneValue() || !CI->isZero()) {
2799 if (Pred == ICmpInst::ICMP_EQ)
2800 return ConstantInt::getFalse(RHS->getContext());
2801 if (Pred == ICmpInst::ICMP_NE)
2802 return ConstantInt::getTrue(RHS->getContext());
2805 if (CIVal->isSignMask() && CI2Val->isOneValue()) {
2806 if (Pred == ICmpInst::ICMP_UGT)
2807 return ConstantInt::getFalse(RHS->getContext());
2808 if (Pred == ICmpInst::ICMP_ULE)
2809 return ConstantInt::getTrue(RHS->getContext());
2814 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
2815 LBO->getOperand(1) == RBO->getOperand(1)) {
2816 switch (LBO->getOpcode()) {
2819 case Instruction::UDiv:
2820 case Instruction::LShr:
2821 if (ICmpInst::isSigned(Pred) || !LBO->isExact() || !RBO->isExact())
2823 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2824 RBO->getOperand(0), Q, MaxRecurse - 1))
2827 case Instruction::SDiv:
2828 if (!ICmpInst::isEquality(Pred) || !LBO->isExact() || !RBO->isExact())
2830 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2831 RBO->getOperand(0), Q, MaxRecurse - 1))
2834 case Instruction::AShr:
2835 if (!LBO->isExact() || !RBO->isExact())
2837 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2838 RBO->getOperand(0), Q, MaxRecurse - 1))
2841 case Instruction::Shl: {
2842 bool NUW = LBO->hasNoUnsignedWrap() && RBO->hasNoUnsignedWrap();
2843 bool NSW = LBO->hasNoSignedWrap() && RBO->hasNoSignedWrap();
2846 if (!NSW && ICmpInst::isSigned(Pred))
2848 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2849 RBO->getOperand(0), Q, MaxRecurse - 1))
2858 /// Simplify integer comparisons where at least one operand of the compare
2859 /// matches an integer min/max idiom.
2860 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
2861 Value *RHS, const SimplifyQuery &Q,
2862 unsigned MaxRecurse) {
2863 Type *ITy = GetCompareTy(LHS); // The return type.
2865 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
2866 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
2868 // Signed variants on "max(a,b)>=a -> true".
2869 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
2871 std::swap(A, B); // smax(A, B) pred A.
2872 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
2873 // We analyze this as smax(A, B) pred A.
2875 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
2876 (A == LHS || B == LHS)) {
2878 std::swap(A, B); // A pred smax(A, B).
2879 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
2880 // We analyze this as smax(A, B) swapped-pred A.
2881 P = CmpInst::getSwappedPredicate(Pred);
2882 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
2883 (A == RHS || B == RHS)) {
2885 std::swap(A, B); // smin(A, B) pred A.
2886 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
2887 // We analyze this as smax(-A, -B) swapped-pred -A.
2888 // Note that we do not need to actually form -A or -B thanks to EqP.
2889 P = CmpInst::getSwappedPredicate(Pred);
2890 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
2891 (A == LHS || B == LHS)) {
2893 std::swap(A, B); // A pred smin(A, B).
2894 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
2895 // We analyze this as smax(-A, -B) pred -A.
2896 // Note that we do not need to actually form -A or -B thanks to EqP.
2899 if (P != CmpInst::BAD_ICMP_PREDICATE) {
2900 // Cases correspond to "max(A, B) p A".
2904 case CmpInst::ICMP_EQ:
2905 case CmpInst::ICMP_SLE:
2906 // Equivalent to "A EqP B". This may be the same as the condition tested
2907 // in the max/min; if so, we can just return that.
2908 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
2910 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
2912 // Otherwise, see if "A EqP B" simplifies.
2914 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
2917 case CmpInst::ICMP_NE:
2918 case CmpInst::ICMP_SGT: {
2919 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
2920 // Equivalent to "A InvEqP B". This may be the same as the condition
2921 // tested in the max/min; if so, we can just return that.
2922 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
2924 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
2926 // Otherwise, see if "A InvEqP B" simplifies.
2928 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
2932 case CmpInst::ICMP_SGE:
2934 return getTrue(ITy);
2935 case CmpInst::ICMP_SLT:
2937 return getFalse(ITy);
2941 // Unsigned variants on "max(a,b)>=a -> true".
2942 P = CmpInst::BAD_ICMP_PREDICATE;
2943 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
2945 std::swap(A, B); // umax(A, B) pred A.
2946 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
2947 // We analyze this as umax(A, B) pred A.
2949 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
2950 (A == LHS || B == LHS)) {
2952 std::swap(A, B); // A pred umax(A, B).
2953 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
2954 // We analyze this as umax(A, B) swapped-pred A.
2955 P = CmpInst::getSwappedPredicate(Pred);
2956 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
2957 (A == RHS || B == RHS)) {
2959 std::swap(A, B); // umin(A, B) pred A.
2960 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
2961 // We analyze this as umax(-A, -B) swapped-pred -A.
2962 // Note that we do not need to actually form -A or -B thanks to EqP.
2963 P = CmpInst::getSwappedPredicate(Pred);
2964 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
2965 (A == LHS || B == LHS)) {
2967 std::swap(A, B); // A pred umin(A, B).
2968 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
2969 // We analyze this as umax(-A, -B) pred -A.
2970 // Note that we do not need to actually form -A or -B thanks to EqP.
2973 if (P != CmpInst::BAD_ICMP_PREDICATE) {
2974 // Cases correspond to "max(A, B) p A".
2978 case CmpInst::ICMP_EQ:
2979 case CmpInst::ICMP_ULE:
2980 // Equivalent to "A EqP B". This may be the same as the condition tested
2981 // in the max/min; if so, we can just return that.
2982 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
2984 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
2986 // Otherwise, see if "A EqP B" simplifies.
2988 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
2991 case CmpInst::ICMP_NE:
2992 case CmpInst::ICMP_UGT: {
2993 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
2994 // Equivalent to "A InvEqP B". This may be the same as the condition
2995 // tested in the max/min; if so, we can just return that.
2996 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
2998 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3000 // Otherwise, see if "A InvEqP B" simplifies.
3002 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3006 case CmpInst::ICMP_UGE:
3008 return getTrue(ITy);
3009 case CmpInst::ICMP_ULT:
3011 return getFalse(ITy);
3015 // Variants on "max(x,y) >= min(x,z)".
3017 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3018 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3019 (A == C || A == D || B == C || B == D)) {
3020 // max(x, ?) pred min(x, ?).
3021 if (Pred == CmpInst::ICMP_SGE)
3023 return getTrue(ITy);
3024 if (Pred == CmpInst::ICMP_SLT)
3026 return getFalse(ITy);
3027 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3028 match(RHS, m_SMax(m_Value(C), m_Value(D))) &&
3029 (A == C || A == D || B == C || B == D)) {
3030 // min(x, ?) pred max(x, ?).
3031 if (Pred == CmpInst::ICMP_SLE)
3033 return getTrue(ITy);
3034 if (Pred == CmpInst::ICMP_SGT)
3036 return getFalse(ITy);
3037 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3038 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3039 (A == C || A == D || B == C || B == D)) {
3040 // max(x, ?) pred min(x, ?).
3041 if (Pred == CmpInst::ICMP_UGE)
3043 return getTrue(ITy);
3044 if (Pred == CmpInst::ICMP_ULT)
3046 return getFalse(ITy);
3047 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3048 match(RHS, m_UMax(m_Value(C), m_Value(D))) &&
3049 (A == C || A == D || B == C || B == D)) {
3050 // min(x, ?) pred max(x, ?).
3051 if (Pred == CmpInst::ICMP_ULE)
3053 return getTrue(ITy);
3054 if (Pred == CmpInst::ICMP_UGT)
3056 return getFalse(ITy);
3062 /// Given operands for an ICmpInst, see if we can fold the result.
3063 /// If not, this returns null.
3064 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3065 const SimplifyQuery &Q, unsigned MaxRecurse) {
3066 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3067 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3069 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3070 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3071 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3073 // If we have a constant, make sure it is on the RHS.
3074 std::swap(LHS, RHS);
3075 Pred = CmpInst::getSwappedPredicate(Pred);
3078 Type *ITy = GetCompareTy(LHS); // The return type.
3080 // icmp X, X -> true/false
3081 // icmp X, undef -> true/false because undef could be X.
3082 if (LHS == RHS || isa<UndefValue>(RHS))
3083 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3085 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3088 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3091 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS))
3094 // If both operands have range metadata, use the metadata
3095 // to simplify the comparison.
3096 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3097 auto RHS_Instr = cast<Instruction>(RHS);
3098 auto LHS_Instr = cast<Instruction>(LHS);
3100 if (RHS_Instr->getMetadata(LLVMContext::MD_range) &&
3101 LHS_Instr->getMetadata(LLVMContext::MD_range)) {
3102 auto RHS_CR = getConstantRangeFromMetadata(
3103 *RHS_Instr->getMetadata(LLVMContext::MD_range));
3104 auto LHS_CR = getConstantRangeFromMetadata(
3105 *LHS_Instr->getMetadata(LLVMContext::MD_range));
3107 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR);
3108 if (Satisfied_CR.contains(LHS_CR))
3109 return ConstantInt::getTrue(RHS->getContext());
3111 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion(
3112 CmpInst::getInversePredicate(Pred), RHS_CR);
3113 if (InversedSatisfied_CR.contains(LHS_CR))
3114 return ConstantInt::getFalse(RHS->getContext());
3118 // Compare of cast, for example (zext X) != 0 -> X != 0
3119 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3120 Instruction *LI = cast<CastInst>(LHS);
3121 Value *SrcOp = LI->getOperand(0);
3122 Type *SrcTy = SrcOp->getType();
3123 Type *DstTy = LI->getType();
3125 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3126 // if the integer type is the same size as the pointer type.
3127 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3128 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3129 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3130 // Transfer the cast to the constant.
3131 if (Value *V = SimplifyICmpInst(Pred, SrcOp,
3132 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3135 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3136 if (RI->getOperand(0)->getType() == SrcTy)
3137 // Compare without the cast.
3138 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3144 if (isa<ZExtInst>(LHS)) {
3145 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3147 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3148 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3149 // Compare X and Y. Note that signed predicates become unsigned.
3150 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3151 SrcOp, RI->getOperand(0), Q,
3155 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3156 // too. If not, then try to deduce the result of the comparison.
3157 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3158 // Compute the constant that would happen if we truncated to SrcTy then
3159 // reextended to DstTy.
3160 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3161 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3163 // If the re-extended constant didn't change then this is effectively
3164 // also a case of comparing two zero-extended values.
3165 if (RExt == CI && MaxRecurse)
3166 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3167 SrcOp, Trunc, Q, MaxRecurse-1))
3170 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3171 // there. Use this to work out the result of the comparison.
3174 default: llvm_unreachable("Unknown ICmp predicate!");
3176 case ICmpInst::ICMP_EQ:
3177 case ICmpInst::ICMP_UGT:
3178 case ICmpInst::ICMP_UGE:
3179 return ConstantInt::getFalse(CI->getContext());
3181 case ICmpInst::ICMP_NE:
3182 case ICmpInst::ICMP_ULT:
3183 case ICmpInst::ICMP_ULE:
3184 return ConstantInt::getTrue(CI->getContext());
3186 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3187 // is non-negative then LHS <s RHS.
3188 case ICmpInst::ICMP_SGT:
3189 case ICmpInst::ICMP_SGE:
3190 return CI->getValue().isNegative() ?
3191 ConstantInt::getTrue(CI->getContext()) :
3192 ConstantInt::getFalse(CI->getContext());
3194 case ICmpInst::ICMP_SLT:
3195 case ICmpInst::ICMP_SLE:
3196 return CI->getValue().isNegative() ?
3197 ConstantInt::getFalse(CI->getContext()) :
3198 ConstantInt::getTrue(CI->getContext());
3204 if (isa<SExtInst>(LHS)) {
3205 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3207 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3208 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3209 // Compare X and Y. Note that the predicate does not change.
3210 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3214 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3215 // too. If not, then try to deduce the result of the comparison.
3216 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3217 // Compute the constant that would happen if we truncated to SrcTy then
3218 // reextended to DstTy.
3219 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3220 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3222 // If the re-extended constant didn't change then this is effectively
3223 // also a case of comparing two sign-extended values.
3224 if (RExt == CI && MaxRecurse)
3225 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
3228 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3229 // bits there. Use this to work out the result of the comparison.
3232 default: llvm_unreachable("Unknown ICmp predicate!");
3233 case ICmpInst::ICMP_EQ:
3234 return ConstantInt::getFalse(CI->getContext());
3235 case ICmpInst::ICMP_NE:
3236 return ConstantInt::getTrue(CI->getContext());
3238 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
3240 case ICmpInst::ICMP_SGT:
3241 case ICmpInst::ICMP_SGE:
3242 return CI->getValue().isNegative() ?
3243 ConstantInt::getTrue(CI->getContext()) :
3244 ConstantInt::getFalse(CI->getContext());
3245 case ICmpInst::ICMP_SLT:
3246 case ICmpInst::ICMP_SLE:
3247 return CI->getValue().isNegative() ?
3248 ConstantInt::getFalse(CI->getContext()) :
3249 ConstantInt::getTrue(CI->getContext());
3251 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
3253 case ICmpInst::ICMP_UGT:
3254 case ICmpInst::ICMP_UGE:
3255 // Comparison is true iff the LHS <s 0.
3257 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3258 Constant::getNullValue(SrcTy),
3262 case ICmpInst::ICMP_ULT:
3263 case ICmpInst::ICMP_ULE:
3264 // Comparison is true iff the LHS >=s 0.
3266 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3267 Constant::getNullValue(SrcTy),
3277 // icmp eq|ne X, Y -> false|true if X != Y
3278 if (ICmpInst::isEquality(Pred) &&
3279 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT)) {
3280 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
3283 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3286 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3289 // Simplify comparisons of related pointers using a powerful, recursive
3290 // GEP-walk when we have target data available..
3291 if (LHS->getType()->isPointerTy())
3292 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, LHS,
3295 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3296 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3297 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3298 Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3299 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3300 Q.DL.getTypeSizeInBits(CRHS->getType()))
3301 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI,
3302 CLHS->getPointerOperand(),
3303 CRHS->getPointerOperand()))
3306 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
3307 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
3308 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
3309 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
3310 (ICmpInst::isEquality(Pred) ||
3311 (GLHS->isInBounds() && GRHS->isInBounds() &&
3312 Pred == ICmpInst::getSignedPredicate(Pred)))) {
3313 // The bases are equal and the indices are constant. Build a constant
3314 // expression GEP with the same indices and a null base pointer to see
3315 // what constant folding can make out of it.
3316 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
3317 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
3318 Constant *NewLHS = ConstantExpr::getGetElementPtr(
3319 GLHS->getSourceElementType(), Null, IndicesLHS);
3321 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
3322 Constant *NewRHS = ConstantExpr::getGetElementPtr(
3323 GLHS->getSourceElementType(), Null, IndicesRHS);
3324 return ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
3329 // If the comparison is with the result of a select instruction, check whether
3330 // comparing with either branch of the select always yields the same value.
3331 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3332 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3335 // If the comparison is with the result of a phi instruction, check whether
3336 // doing the compare with each incoming phi value yields a common result.
3337 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3338 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3344 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3345 const SimplifyQuery &Q) {
3346 return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
3349 /// Given operands for an FCmpInst, see if we can fold the result.
3350 /// If not, this returns null.
3351 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3352 FastMathFlags FMF, const SimplifyQuery &Q,
3353 unsigned MaxRecurse) {
3354 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3355 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
3357 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3358 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3359 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3361 // If we have a constant, make sure it is on the RHS.
3362 std::swap(LHS, RHS);
3363 Pred = CmpInst::getSwappedPredicate(Pred);
3366 // Fold trivial predicates.
3367 Type *RetTy = GetCompareTy(LHS);
3368 if (Pred == FCmpInst::FCMP_FALSE)
3369 return getFalse(RetTy);
3370 if (Pred == FCmpInst::FCMP_TRUE)
3371 return getTrue(RetTy);
3373 // UNO/ORD predicates can be trivially folded if NaNs are ignored.
3375 if (Pred == FCmpInst::FCMP_UNO)
3376 return getFalse(RetTy);
3377 if (Pred == FCmpInst::FCMP_ORD)
3378 return getTrue(RetTy);
3381 // NaN is unordered; NaN is not ordered.
3382 assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) &&
3383 "Comparison must be either ordered or unordered");
3384 if (match(RHS, m_NaN()))
3385 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3387 // fcmp pred x, undef and fcmp pred undef, x
3388 // fold to true if unordered, false if ordered
3389 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) {
3390 // Choosing NaN for the undef will always make unordered comparison succeed
3391 // and ordered comparison fail.
3392 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3395 // fcmp x,x -> true/false. Not all compares are foldable.
3397 if (CmpInst::isTrueWhenEqual(Pred))
3398 return getTrue(RetTy);
3399 if (CmpInst::isFalseWhenEqual(Pred))
3400 return getFalse(RetTy);
3403 // Handle fcmp with constant RHS.
3405 if (match(RHS, m_APFloat(C))) {
3406 // Check whether the constant is an infinity.
3407 if (C->isInfinity()) {
3408 if (C->isNegative()) {
3410 case FCmpInst::FCMP_OLT:
3411 // No value is ordered and less than negative infinity.
3412 return getFalse(RetTy);
3413 case FCmpInst::FCMP_UGE:
3414 // All values are unordered with or at least negative infinity.
3415 return getTrue(RetTy);
3421 case FCmpInst::FCMP_OGT:
3422 // No value is ordered and greater than infinity.
3423 return getFalse(RetTy);
3424 case FCmpInst::FCMP_ULE:
3425 // All values are unordered with and at most infinity.
3426 return getTrue(RetTy);
3434 case FCmpInst::FCMP_UGE:
3435 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3436 return getTrue(RetTy);
3438 case FCmpInst::FCMP_OLT:
3440 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3441 return getFalse(RetTy);
3446 } else if (C->isNegative()) {
3447 assert(!C->isNaN() && "Unexpected NaN constant!");
3448 // TODO: We can catch more cases by using a range check rather than
3449 // relying on CannotBeOrderedLessThanZero.
3451 case FCmpInst::FCMP_UGE:
3452 case FCmpInst::FCMP_UGT:
3453 case FCmpInst::FCMP_UNE:
3454 // (X >= 0) implies (X > C) when (C < 0)
3455 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3456 return getTrue(RetTy);
3458 case FCmpInst::FCMP_OEQ:
3459 case FCmpInst::FCMP_OLE:
3460 case FCmpInst::FCMP_OLT:
3461 // (X >= 0) implies !(X < C) when (C < 0)
3462 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3463 return getFalse(RetTy);
3471 // If the comparison is with the result of a select instruction, check whether
3472 // comparing with either branch of the select always yields the same value.
3473 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3474 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3477 // If the comparison is with the result of a phi instruction, check whether
3478 // doing the compare with each incoming phi value yields a common result.
3479 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3480 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3486 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3487 FastMathFlags FMF, const SimplifyQuery &Q) {
3488 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
3491 /// See if V simplifies when its operand Op is replaced with RepOp.
3492 static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3493 const SimplifyQuery &Q,
3494 unsigned MaxRecurse) {
3495 // Trivial replacement.
3499 // We cannot replace a constant, and shouldn't even try.
3500 if (isa<Constant>(Op))
3503 auto *I = dyn_cast<Instruction>(V);
3507 // If this is a binary operator, try to simplify it with the replaced op.
3508 if (auto *B = dyn_cast<BinaryOperator>(I)) {
3510 // %cmp = icmp eq i32 %x, 2147483647
3511 // %add = add nsw i32 %x, 1
3512 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
3514 // We can't replace %sel with %add unless we strip away the flags.
3515 if (isa<OverflowingBinaryOperator>(B))
3516 if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap())
3518 if (isa<PossiblyExactOperator>(B))
3523 if (B->getOperand(0) == Op)
3524 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q,
3526 if (B->getOperand(1) == Op)
3527 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q,
3532 // Same for CmpInsts.
3533 if (CmpInst *C = dyn_cast<CmpInst>(I)) {
3535 if (C->getOperand(0) == Op)
3536 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q,
3538 if (C->getOperand(1) == Op)
3539 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q,
3545 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3547 SmallVector<Value *, 8> NewOps(GEP->getNumOperands());
3548 transform(GEP->operands(), NewOps.begin(),
3549 [&](Value *V) { return V == Op ? RepOp : V; });
3550 return SimplifyGEPInst(GEP->getSourceElementType(), NewOps, Q,
3555 // TODO: We could hand off more cases to instsimplify here.
3557 // If all operands are constant after substituting Op for RepOp then we can
3558 // constant fold the instruction.
3559 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) {
3560 // Build a list of all constant operands.
3561 SmallVector<Constant *, 8> ConstOps;
3562 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3563 if (I->getOperand(i) == Op)
3564 ConstOps.push_back(CRepOp);
3565 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i)))
3566 ConstOps.push_back(COp);
3571 // All operands were constants, fold it.
3572 if (ConstOps.size() == I->getNumOperands()) {
3573 if (CmpInst *C = dyn_cast<CmpInst>(I))
3574 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
3575 ConstOps[1], Q.DL, Q.TLI);
3577 if (LoadInst *LI = dyn_cast<LoadInst>(I))
3578 if (!LI->isVolatile())
3579 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
3581 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
3588 /// Try to simplify a select instruction when its condition operand is an
3589 /// integer comparison where one operand of the compare is a constant.
3590 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
3591 const APInt *Y, bool TrueWhenUnset) {
3594 // (X & Y) == 0 ? X & ~Y : X --> X
3595 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
3596 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
3598 return TrueWhenUnset ? FalseVal : TrueVal;
3600 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
3601 // (X & Y) != 0 ? X : X & ~Y --> X
3602 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
3604 return TrueWhenUnset ? FalseVal : TrueVal;
3606 if (Y->isPowerOf2()) {
3607 // (X & Y) == 0 ? X | Y : X --> X | Y
3608 // (X & Y) != 0 ? X | Y : X --> X
3609 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
3611 return TrueWhenUnset ? TrueVal : FalseVal;
3613 // (X & Y) == 0 ? X : X | Y --> X
3614 // (X & Y) != 0 ? X : X | Y --> X | Y
3615 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
3617 return TrueWhenUnset ? TrueVal : FalseVal;
3623 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
3625 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS,
3626 ICmpInst::Predicate Pred,
3627 Value *TrueVal, Value *FalseVal) {
3630 if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask))
3633 return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask,
3634 Pred == ICmpInst::ICMP_EQ);
3637 /// Try to simplify a select instruction when its condition operand is an
3638 /// integer comparison.
3639 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
3640 Value *FalseVal, const SimplifyQuery &Q,
3641 unsigned MaxRecurse) {
3642 ICmpInst::Predicate Pred;
3643 Value *CmpLHS, *CmpRHS;
3644 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
3647 if (ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero())) {
3650 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
3651 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
3652 Pred == ICmpInst::ICMP_EQ))
3656 // Check for other compares that behave like bit test.
3657 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred,
3661 // If we have an equality comparison, then we know the value in one of the
3662 // arms of the select. See if substituting this value into the arm and
3663 // simplifying the result yields the same value as the other arm.
3664 if (Pred == ICmpInst::ICMP_EQ) {
3665 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3667 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3670 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3672 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3675 } else if (Pred == ICmpInst::ICMP_NE) {
3676 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3678 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3681 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3683 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3691 /// Given operands for a SelectInst, see if we can fold the result.
3692 /// If not, this returns null.
3693 static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
3694 const SimplifyQuery &Q, unsigned MaxRecurse) {
3695 if (auto *CondC = dyn_cast<Constant>(Cond)) {
3696 if (auto *TrueC = dyn_cast<Constant>(TrueVal))
3697 if (auto *FalseC = dyn_cast<Constant>(FalseVal))
3698 return ConstantFoldSelectInstruction(CondC, TrueC, FalseC);
3700 // select undef, X, Y -> X or Y
3701 if (isa<UndefValue>(CondC))
3702 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
3704 // TODO: Vector constants with undef elements don't simplify.
3706 // select true, X, Y -> X
3707 if (CondC->isAllOnesValue())
3709 // select false, X, Y -> Y
3710 if (CondC->isNullValue())
3714 // select ?, X, X -> X
3715 if (TrueVal == FalseVal)
3718 if (isa<UndefValue>(TrueVal)) // select ?, undef, X -> X
3720 if (isa<UndefValue>(FalseVal)) // select ?, X, undef -> X
3724 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
3730 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
3731 const SimplifyQuery &Q) {
3732 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
3735 /// Given operands for an GetElementPtrInst, see if we can fold the result.
3736 /// If not, this returns null.
3737 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
3738 const SimplifyQuery &Q, unsigned) {
3739 // The type of the GEP pointer operand.
3741 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
3743 // getelementptr P -> P.
3744 if (Ops.size() == 1)
3747 // Compute the (pointer) type returned by the GEP instruction.
3748 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
3749 Type *GEPTy = PointerType::get(LastType, AS);
3750 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
3751 GEPTy = VectorType::get(GEPTy, VT->getNumElements());
3752 else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType()))
3753 GEPTy = VectorType::get(GEPTy, VT->getNumElements());
3755 if (isa<UndefValue>(Ops[0]))
3756 return UndefValue::get(GEPTy);
3758 if (Ops.size() == 2) {
3759 // getelementptr P, 0 -> P.
3760 if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy)
3764 if (Ty->isSized()) {
3767 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
3768 // getelementptr P, N -> P if P points to a type of zero size.
3769 if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy)
3772 // The following transforms are only safe if the ptrtoint cast
3773 // doesn't truncate the pointers.
3774 if (Ops[1]->getType()->getScalarSizeInBits() ==
3775 Q.DL.getIndexSizeInBits(AS)) {
3776 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
3777 if (match(P, m_Zero()))
3778 return Constant::getNullValue(GEPTy);
3780 if (match(P, m_PtrToInt(m_Value(Temp))))
3781 if (Temp->getType() == GEPTy)
3786 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
3787 if (TyAllocSize == 1 &&
3788 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0])))))
3789 if (Value *R = PtrToIntOrZero(P))
3792 // getelementptr V, (ashr (sub P, V), C) -> Q
3793 // if P points to a type of size 1 << C.
3795 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
3796 m_ConstantInt(C))) &&
3797 TyAllocSize == 1ULL << C)
3798 if (Value *R = PtrToIntOrZero(P))
3801 // getelementptr V, (sdiv (sub P, V), C) -> Q
3802 // if P points to a type of size C.
3804 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
3805 m_SpecificInt(TyAllocSize))))
3806 if (Value *R = PtrToIntOrZero(P))
3812 if (Q.DL.getTypeAllocSize(LastType) == 1 &&
3813 all_of(Ops.slice(1).drop_back(1),
3814 [](Value *Idx) { return match(Idx, m_Zero()); })) {
3816 Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
3817 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) {
3818 APInt BasePtrOffset(IdxWidth, 0);
3819 Value *StrippedBasePtr =
3820 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
3823 // gep (gep V, C), (sub 0, V) -> C
3824 if (match(Ops.back(),
3825 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) {
3826 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
3827 return ConstantExpr::getIntToPtr(CI, GEPTy);
3829 // gep (gep V, C), (xor V, -1) -> C-1
3830 if (match(Ops.back(),
3831 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) {
3832 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
3833 return ConstantExpr::getIntToPtr(CI, GEPTy);
3838 // Check to see if this is constant foldable.
3839 if (!all_of(Ops, [](Value *V) { return isa<Constant>(V); }))
3842 auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
3844 if (auto *CEFolded = ConstantFoldConstant(CE, Q.DL))
3849 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
3850 const SimplifyQuery &Q) {
3851 return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit);
3854 /// Given operands for an InsertValueInst, see if we can fold the result.
3855 /// If not, this returns null.
3856 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
3857 ArrayRef<unsigned> Idxs, const SimplifyQuery &Q,
3859 if (Constant *CAgg = dyn_cast<Constant>(Agg))
3860 if (Constant *CVal = dyn_cast<Constant>(Val))
3861 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
3863 // insertvalue x, undef, n -> x
3864 if (match(Val, m_Undef()))
3867 // insertvalue x, (extractvalue y, n), n
3868 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
3869 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
3870 EV->getIndices() == Idxs) {
3871 // insertvalue undef, (extractvalue y, n), n -> y
3872 if (match(Agg, m_Undef()))
3873 return EV->getAggregateOperand();
3875 // insertvalue y, (extractvalue y, n), n -> y
3876 if (Agg == EV->getAggregateOperand())
3883 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
3884 ArrayRef<unsigned> Idxs,
3885 const SimplifyQuery &Q) {
3886 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
3889 Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
3890 const SimplifyQuery &Q) {
3891 // Try to constant fold.
3892 auto *VecC = dyn_cast<Constant>(Vec);
3893 auto *ValC = dyn_cast<Constant>(Val);
3894 auto *IdxC = dyn_cast<Constant>(Idx);
3895 if (VecC && ValC && IdxC)
3896 return ConstantFoldInsertElementInstruction(VecC, ValC, IdxC);
3898 // Fold into undef if index is out of bounds.
3899 if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
3900 uint64_t NumElements = cast<VectorType>(Vec->getType())->getNumElements();
3901 if (CI->uge(NumElements))
3902 return UndefValue::get(Vec->getType());
3905 // If index is undef, it might be out of bounds (see above case)
3906 if (isa<UndefValue>(Idx))
3907 return UndefValue::get(Vec->getType());
3912 /// Given operands for an ExtractValueInst, see if we can fold the result.
3913 /// If not, this returns null.
3914 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
3915 const SimplifyQuery &, unsigned) {
3916 if (auto *CAgg = dyn_cast<Constant>(Agg))
3917 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
3919 // extractvalue x, (insertvalue y, elt, n), n -> elt
3920 unsigned NumIdxs = Idxs.size();
3921 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
3922 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
3923 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
3924 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
3925 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
3926 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
3927 Idxs.slice(0, NumCommonIdxs)) {
3928 if (NumIdxs == NumInsertValueIdxs)
3929 return IVI->getInsertedValueOperand();
3937 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
3938 const SimplifyQuery &Q) {
3939 return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
3942 /// Given operands for an ExtractElementInst, see if we can fold the result.
3943 /// If not, this returns null.
3944 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &,
3946 if (auto *CVec = dyn_cast<Constant>(Vec)) {
3947 if (auto *CIdx = dyn_cast<Constant>(Idx))
3948 return ConstantFoldExtractElementInstruction(CVec, CIdx);
3950 // The index is not relevant if our vector is a splat.
3951 if (auto *Splat = CVec->getSplatValue())
3954 if (isa<UndefValue>(Vec))
3955 return UndefValue::get(Vec->getType()->getVectorElementType());
3958 // If extracting a specified index from the vector, see if we can recursively
3959 // find a previously computed scalar that was inserted into the vector.
3960 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
3961 if (IdxC->getValue().uge(Vec->getType()->getVectorNumElements()))
3962 // definitely out of bounds, thus undefined result
3963 return UndefValue::get(Vec->getType()->getVectorElementType());
3964 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
3968 // An undef extract index can be arbitrarily chosen to be an out-of-range
3969 // index value, which would result in the instruction being undef.
3970 if (isa<UndefValue>(Idx))
3971 return UndefValue::get(Vec->getType()->getVectorElementType());
3976 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx,
3977 const SimplifyQuery &Q) {
3978 return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
3981 /// See if we can fold the given phi. If not, returns null.
3982 static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) {
3983 // If all of the PHI's incoming values are the same then replace the PHI node
3984 // with the common value.
3985 Value *CommonValue = nullptr;
3986 bool HasUndefInput = false;
3987 for (Value *Incoming : PN->incoming_values()) {
3988 // If the incoming value is the phi node itself, it can safely be skipped.
3989 if (Incoming == PN) continue;
3990 if (isa<UndefValue>(Incoming)) {
3991 // Remember that we saw an undef value, but otherwise ignore them.
3992 HasUndefInput = true;
3995 if (CommonValue && Incoming != CommonValue)
3996 return nullptr; // Not the same, bail out.
3997 CommonValue = Incoming;
4000 // If CommonValue is null then all of the incoming values were either undef or
4001 // equal to the phi node itself.
4003 return UndefValue::get(PN->getType());
4005 // If we have a PHI node like phi(X, undef, X), where X is defined by some
4006 // instruction, we cannot return X as the result of the PHI node unless it
4007 // dominates the PHI block.
4009 return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
4014 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
4015 Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) {
4016 if (auto *C = dyn_cast<Constant>(Op))
4017 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
4019 if (auto *CI = dyn_cast<CastInst>(Op)) {
4020 auto *Src = CI->getOperand(0);
4021 Type *SrcTy = Src->getType();
4022 Type *MidTy = CI->getType();
4024 if (Src->getType() == Ty) {
4025 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
4026 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
4028 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
4030 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
4032 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
4033 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
4034 SrcIntPtrTy, MidIntPtrTy,
4035 DstIntPtrTy) == Instruction::BitCast)
4041 if (CastOpc == Instruction::BitCast)
4042 if (Op->getType() == Ty)
4048 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
4049 const SimplifyQuery &Q) {
4050 return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
4053 /// For the given destination element of a shuffle, peek through shuffles to
4054 /// match a root vector source operand that contains that element in the same
4055 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
4056 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
4057 int MaskVal, Value *RootVec,
4058 unsigned MaxRecurse) {
4062 // Bail out if any mask value is undefined. That kind of shuffle may be
4063 // simplified further based on demanded bits or other folds.
4067 // The mask value chooses which source operand we need to look at next.
4068 int InVecNumElts = Op0->getType()->getVectorNumElements();
4069 int RootElt = MaskVal;
4070 Value *SourceOp = Op0;
4071 if (MaskVal >= InVecNumElts) {
4072 RootElt = MaskVal - InVecNumElts;
4076 // If the source operand is a shuffle itself, look through it to find the
4077 // matching root vector.
4078 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
4079 return foldIdentityShuffles(
4080 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
4081 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
4084 // TODO: Look through bitcasts? What if the bitcast changes the vector element
4087 // The source operand is not a shuffle. Initialize the root vector value for
4088 // this shuffle if that has not been done yet.
4092 // Give up as soon as a source operand does not match the existing root value.
4093 if (RootVec != SourceOp)
4096 // The element must be coming from the same lane in the source vector
4097 // (although it may have crossed lanes in intermediate shuffles).
4098 if (RootElt != DestElt)
4104 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
4105 Type *RetTy, const SimplifyQuery &Q,
4106 unsigned MaxRecurse) {
4107 if (isa<UndefValue>(Mask))
4108 return UndefValue::get(RetTy);
4110 Type *InVecTy = Op0->getType();
4111 unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
4112 unsigned InVecNumElts = InVecTy->getVectorNumElements();
4114 SmallVector<int, 32> Indices;
4115 ShuffleVectorInst::getShuffleMask(Mask, Indices);
4116 assert(MaskNumElts == Indices.size() &&
4117 "Size of Indices not same as number of mask elements?");
4119 // Canonicalization: If mask does not select elements from an input vector,
4120 // replace that input vector with undef.
4121 bool MaskSelects0 = false, MaskSelects1 = false;
4122 for (unsigned i = 0; i != MaskNumElts; ++i) {
4123 if (Indices[i] == -1)
4125 if ((unsigned)Indices[i] < InVecNumElts)
4126 MaskSelects0 = true;
4128 MaskSelects1 = true;
4131 Op0 = UndefValue::get(InVecTy);
4133 Op1 = UndefValue::get(InVecTy);
4135 auto *Op0Const = dyn_cast<Constant>(Op0);
4136 auto *Op1Const = dyn_cast<Constant>(Op1);
4138 // If all operands are constant, constant fold the shuffle.
4139 if (Op0Const && Op1Const)
4140 return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
4142 // Canonicalization: if only one input vector is constant, it shall be the
4144 if (Op0Const && !Op1Const) {
4145 std::swap(Op0, Op1);
4146 ShuffleVectorInst::commuteShuffleMask(Indices, InVecNumElts);
4149 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
4150 // value type is same as the input vectors' type.
4151 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
4152 if (isa<UndefValue>(Op1) && RetTy == InVecTy &&
4153 OpShuf->getMask()->getSplatValue())
4156 // Don't fold a shuffle with undef mask elements. This may get folded in a
4157 // better way using demanded bits or other analysis.
4158 // TODO: Should we allow this?
4159 if (find(Indices, -1) != Indices.end())
4162 // Check if every element of this shuffle can be mapped back to the
4163 // corresponding element of a single root vector. If so, we don't need this
4164 // shuffle. This handles simple identity shuffles as well as chains of
4165 // shuffles that may widen/narrow and/or move elements across lanes and back.
4166 Value *RootVec = nullptr;
4167 for (unsigned i = 0; i != MaskNumElts; ++i) {
4168 // Note that recursion is limited for each vector element, so if any element
4169 // exceeds the limit, this will fail to simplify.
4171 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
4173 // We can't replace a widening/narrowing shuffle with one of its operands.
4174 if (!RootVec || RootVec->getType() != RetTy)
4180 /// Given operands for a ShuffleVectorInst, fold the result or return null.
4181 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
4182 Type *RetTy, const SimplifyQuery &Q) {
4183 return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
4186 static Constant *propagateNaN(Constant *In) {
4187 // If the input is a vector with undef elements, just return a default NaN.
4189 return ConstantFP::getNaN(In->getType());
4191 // Propagate the existing NaN constant when possible.
4192 // TODO: Should we quiet a signaling NaN?
4196 static Constant *simplifyFPBinop(Value *Op0, Value *Op1) {
4197 if (isa<UndefValue>(Op0) || isa<UndefValue>(Op1))
4198 return ConstantFP::getNaN(Op0->getType());
4200 if (match(Op0, m_NaN()))
4201 return propagateNaN(cast<Constant>(Op0));
4202 if (match(Op1, m_NaN()))
4203 return propagateNaN(cast<Constant>(Op1));
4208 /// Given operands for an FAdd, see if we can fold the result. If not, this
4210 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4211 const SimplifyQuery &Q, unsigned MaxRecurse) {
4212 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
4215 if (Constant *C = simplifyFPBinop(Op0, Op1))
4219 if (match(Op1, m_NegZeroFP()))
4222 // fadd X, 0 ==> X, when we know X is not -0
4223 if (match(Op1, m_PosZeroFP()) &&
4224 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4227 // With nnan: (+/-0.0 - X) + X --> 0.0 (and commuted variant)
4228 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
4229 // Negative zeros are allowed because we always end up with positive zero:
4230 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4231 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4232 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
4233 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
4234 if (FMF.noNaNs() && (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
4235 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0)))))
4236 return ConstantFP::getNullValue(Op0->getType());
4241 /// Given operands for an FSub, see if we can fold the result. If not, this
4243 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4244 const SimplifyQuery &Q, unsigned MaxRecurse) {
4245 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
4248 if (Constant *C = simplifyFPBinop(Op0, Op1))
4252 if (match(Op1, m_PosZeroFP()))
4255 // fsub X, -0 ==> X, when we know X is not -0
4256 if (match(Op1, m_NegZeroFP()) &&
4257 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4260 // fsub -0.0, (fsub -0.0, X) ==> X
4262 if (match(Op0, m_NegZeroFP()) &&
4263 match(Op1, m_FSub(m_NegZeroFP(), m_Value(X))))
4266 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
4267 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
4268 match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))))
4271 // fsub nnan x, x ==> 0.0
4272 if (FMF.noNaNs() && Op0 == Op1)
4273 return Constant::getNullValue(Op0->getType());
4278 /// Given the operands for an FMul, see if we can fold the result
4279 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4280 const SimplifyQuery &Q, unsigned MaxRecurse) {
4281 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
4284 if (Constant *C = simplifyFPBinop(Op0, Op1))
4287 // fmul X, 1.0 ==> X
4288 if (match(Op1, m_FPOne()))
4291 // fmul nnan nsz X, 0 ==> 0
4292 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZeroFP()))
4293 return ConstantFP::getNullValue(Op0->getType());
4295 // sqrt(X) * sqrt(X) --> X, if we can:
4296 // 1. Remove the intermediate rounding (reassociate).
4297 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
4298 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
4300 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) &&
4301 FMF.allowReassoc() && FMF.noNaNs() && FMF.noSignedZeros())
4307 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4308 const SimplifyQuery &Q) {
4309 return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit);
4313 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4314 const SimplifyQuery &Q) {
4315 return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit);
4318 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4319 const SimplifyQuery &Q) {
4320 return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit);
4323 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4324 const SimplifyQuery &Q, unsigned) {
4325 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
4328 if (Constant *C = simplifyFPBinop(Op0, Op1))
4332 if (match(Op1, m_FPOne()))
4336 // Requires that NaNs are off (X could be zero) and signed zeroes are
4337 // ignored (X could be positive or negative, so the output sign is unknown).
4338 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
4339 return ConstantFP::getNullValue(Op0->getType());
4342 // X / X -> 1.0 is legal when NaNs are ignored.
4343 // We can ignore infinities because INF/INF is NaN.
4345 return ConstantFP::get(Op0->getType(), 1.0);
4347 // (X * Y) / Y --> X if we can reassociate to the above form.
4349 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
4352 // -X / X -> -1.0 and
4353 // X / -X -> -1.0 are legal when NaNs are ignored.
4354 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
4355 if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) &&
4356 BinaryOperator::getFNegArgument(Op0) == Op1) ||
4357 (BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) &&
4358 BinaryOperator::getFNegArgument(Op1) == Op0))
4359 return ConstantFP::get(Op0->getType(), -1.0);
4365 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4366 const SimplifyQuery &Q) {
4367 return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit);
4370 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4371 const SimplifyQuery &Q, unsigned) {
4372 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
4375 if (Constant *C = simplifyFPBinop(Op0, Op1))
4378 // Unlike fdiv, the result of frem always matches the sign of the dividend.
4379 // The constant match may include undef elements in a vector, so return a full
4380 // zero constant as the result.
4383 if (match(Op0, m_PosZeroFP()))
4384 return ConstantFP::getNullValue(Op0->getType());
4386 if (match(Op0, m_NegZeroFP()))
4387 return ConstantFP::getNegativeZero(Op0->getType());
4393 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4394 const SimplifyQuery &Q) {
4395 return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit);
4398 //=== Helper functions for higher up the class hierarchy.
4400 /// Given operands for a BinaryOperator, see if we can fold the result.
4401 /// If not, this returns null.
4402 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4403 const SimplifyQuery &Q, unsigned MaxRecurse) {
4405 case Instruction::Add:
4406 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse);
4407 case Instruction::Sub:
4408 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse);
4409 case Instruction::Mul:
4410 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse);
4411 case Instruction::SDiv:
4412 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
4413 case Instruction::UDiv:
4414 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
4415 case Instruction::SRem:
4416 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
4417 case Instruction::URem:
4418 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
4419 case Instruction::Shl:
4420 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse);
4421 case Instruction::LShr:
4422 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse);
4423 case Instruction::AShr:
4424 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse);
4425 case Instruction::And:
4426 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
4427 case Instruction::Or:
4428 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse);
4429 case Instruction::Xor:
4430 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
4431 case Instruction::FAdd:
4432 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4433 case Instruction::FSub:
4434 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4435 case Instruction::FMul:
4436 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4437 case Instruction::FDiv:
4438 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4439 case Instruction::FRem:
4440 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4442 llvm_unreachable("Unexpected opcode");
4446 /// Given operands for a BinaryOperator, see if we can fold the result.
4447 /// If not, this returns null.
4448 /// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
4449 /// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
4450 static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4451 const FastMathFlags &FMF, const SimplifyQuery &Q,
4452 unsigned MaxRecurse) {
4454 case Instruction::FAdd:
4455 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
4456 case Instruction::FSub:
4457 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
4458 case Instruction::FMul:
4459 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
4460 case Instruction::FDiv:
4461 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
4463 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
4467 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4468 const SimplifyQuery &Q) {
4469 return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
4472 Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4473 FastMathFlags FMF, const SimplifyQuery &Q) {
4474 return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
4477 /// Given operands for a CmpInst, see if we can fold the result.
4478 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4479 const SimplifyQuery &Q, unsigned MaxRecurse) {
4480 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
4481 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
4482 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4485 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4486 const SimplifyQuery &Q) {
4487 return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
4490 static bool IsIdempotent(Intrinsic::ID ID) {
4492 default: return false;
4494 // Unary idempotent: f(f(x)) = f(x)
4495 case Intrinsic::fabs:
4496 case Intrinsic::floor:
4497 case Intrinsic::ceil:
4498 case Intrinsic::trunc:
4499 case Intrinsic::rint:
4500 case Intrinsic::nearbyint:
4501 case Intrinsic::round:
4502 case Intrinsic::canonicalize:
4507 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset,
4508 const DataLayout &DL) {
4509 GlobalValue *PtrSym;
4511 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
4514 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
4515 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
4516 Type *Int32PtrTy = Int32Ty->getPointerTo();
4517 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
4519 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
4520 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
4523 uint64_t OffsetInt = OffsetConstInt->getSExtValue();
4524 if (OffsetInt % 4 != 0)
4527 Constant *C = ConstantExpr::getGetElementPtr(
4528 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
4529 ConstantInt::get(Int64Ty, OffsetInt / 4));
4530 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
4534 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
4538 if (LoadedCE->getOpcode() == Instruction::Trunc) {
4539 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
4544 if (LoadedCE->getOpcode() != Instruction::Sub)
4547 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
4548 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
4550 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
4552 Constant *LoadedRHS = LoadedCE->getOperand(1);
4553 GlobalValue *LoadedRHSSym;
4554 APInt LoadedRHSOffset;
4555 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
4557 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
4560 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
4563 static bool maskIsAllZeroOrUndef(Value *Mask) {
4564 auto *ConstMask = dyn_cast<Constant>(Mask);
4567 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
4569 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
4571 if (auto *MaskElt = ConstMask->getAggregateElement(I))
4572 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
4579 template <typename IterTy>
4580 static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
4581 const SimplifyQuery &Q, unsigned MaxRecurse) {
4582 Intrinsic::ID IID = F->getIntrinsicID();
4583 unsigned NumOperands = std::distance(ArgBegin, ArgEnd);
4586 if (NumOperands == 1) {
4587 // Perform idempotent optimizations
4588 if (IsIdempotent(IID)) {
4589 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin)) {
4590 if (II->getIntrinsicID() == IID)
4595 Value *IIOperand = *ArgBegin;
4598 case Intrinsic::fabs: {
4599 if (SignBitMustBeZero(IIOperand, Q.TLI))
4603 case Intrinsic::bswap: {
4604 // bswap(bswap(x)) -> x
4605 if (match(IIOperand, m_BSwap(m_Value(X))))
4609 case Intrinsic::bitreverse: {
4610 // bitreverse(bitreverse(x)) -> x
4611 if (match(IIOperand, m_BitReverse(m_Value(X))))
4615 case Intrinsic::exp: {
4617 if (Q.CxtI->hasAllowReassoc() &&
4618 match(IIOperand, m_Intrinsic<Intrinsic::log>(m_Value(X))))
4622 case Intrinsic::exp2: {
4623 // exp2(log2(x)) -> x
4624 if (Q.CxtI->hasAllowReassoc() &&
4625 match(IIOperand, m_Intrinsic<Intrinsic::log2>(m_Value(X))))
4629 case Intrinsic::log: {
4631 if (Q.CxtI->hasAllowReassoc() &&
4632 match(IIOperand, m_Intrinsic<Intrinsic::exp>(m_Value(X))))
4636 case Intrinsic::log2: {
4637 // log2(exp2(x)) -> x
4638 if (Q.CxtI->hasAllowReassoc() &&
4639 match(IIOperand, m_Intrinsic<Intrinsic::exp2>(m_Value(X)))) {
4650 if (NumOperands == 2) {
4651 Value *LHS = *ArgBegin;
4652 Value *RHS = *(ArgBegin + 1);
4653 Type *ReturnType = F->getReturnType();
4656 case Intrinsic::usub_with_overflow:
4657 case Intrinsic::ssub_with_overflow: {
4658 // X - X -> { 0, false }
4660 return Constant::getNullValue(ReturnType);
4662 // X - undef -> undef
4663 // undef - X -> undef
4664 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
4665 return UndefValue::get(ReturnType);
4669 case Intrinsic::uadd_with_overflow:
4670 case Intrinsic::sadd_with_overflow: {
4671 // X + undef -> undef
4672 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
4673 return UndefValue::get(ReturnType);
4677 case Intrinsic::umul_with_overflow:
4678 case Intrinsic::smul_with_overflow: {
4679 // 0 * X -> { 0, false }
4680 // X * 0 -> { 0, false }
4681 if (match(LHS, m_Zero()) || match(RHS, m_Zero()))
4682 return Constant::getNullValue(ReturnType);
4684 // undef * X -> { 0, false }
4685 // X * undef -> { 0, false }
4686 if (match(LHS, m_Undef()) || match(RHS, m_Undef()))
4687 return Constant::getNullValue(ReturnType);
4691 case Intrinsic::load_relative: {
4692 Constant *C0 = dyn_cast<Constant>(LHS);
4693 Constant *C1 = dyn_cast<Constant>(RHS);
4695 return SimplifyRelativeLoad(C0, C1, Q.DL);
4698 case Intrinsic::powi:
4699 if (ConstantInt *Power = dyn_cast<ConstantInt>(RHS)) {
4700 // powi(x, 0) -> 1.0
4701 if (Power->isZero())
4702 return ConstantFP::get(LHS->getType(), 1.0);
4713 // Simplify calls to llvm.masked.load.*
4715 case Intrinsic::masked_load: {
4716 Value *MaskArg = ArgBegin[2];
4717 Value *PassthruArg = ArgBegin[3];
4718 // If the mask is all zeros or undef, the "passthru" argument is the result.
4719 if (maskIsAllZeroOrUndef(MaskArg))
4728 template <typename IterTy>
4729 static Value *SimplifyCall(ImmutableCallSite CS, Value *V, IterTy ArgBegin,
4730 IterTy ArgEnd, const SimplifyQuery &Q,
4731 unsigned MaxRecurse) {
4732 Type *Ty = V->getType();
4733 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
4734 Ty = PTy->getElementType();
4735 FunctionType *FTy = cast<FunctionType>(Ty);
4737 // call undef -> undef
4738 // call null -> undef
4739 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4740 return UndefValue::get(FTy->getReturnType());
4742 Function *F = dyn_cast<Function>(V);
4746 if (F->isIntrinsic())
4747 if (Value *Ret = SimplifyIntrinsic(F, ArgBegin, ArgEnd, Q, MaxRecurse))
4750 if (!canConstantFoldCallTo(CS, F))
4753 SmallVector<Constant *, 4> ConstantArgs;
4754 ConstantArgs.reserve(ArgEnd - ArgBegin);
4755 for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) {
4756 Constant *C = dyn_cast<Constant>(*I);
4759 ConstantArgs.push_back(C);
4762 return ConstantFoldCall(CS, F, ConstantArgs, Q.TLI);
4765 Value *llvm::SimplifyCall(ImmutableCallSite CS, Value *V,
4766 User::op_iterator ArgBegin, User::op_iterator ArgEnd,
4767 const SimplifyQuery &Q) {
4768 return ::SimplifyCall(CS, V, ArgBegin, ArgEnd, Q, RecursionLimit);
4771 Value *llvm::SimplifyCall(ImmutableCallSite CS, Value *V,
4772 ArrayRef<Value *> Args, const SimplifyQuery &Q) {
4773 return ::SimplifyCall(CS, V, Args.begin(), Args.end(), Q, RecursionLimit);
4776 Value *llvm::SimplifyCall(ImmutableCallSite ICS, const SimplifyQuery &Q) {
4777 CallSite CS(const_cast<Instruction*>(ICS.getInstruction()));
4778 return ::SimplifyCall(CS, CS.getCalledValue(), CS.arg_begin(), CS.arg_end(),
4782 /// See if we can compute a simplified version of this instruction.
4783 /// If not, this returns null.
4785 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
4786 OptimizationRemarkEmitter *ORE) {
4787 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
4790 switch (I->getOpcode()) {
4792 Result = ConstantFoldInstruction(I, Q.DL, Q.TLI);
4794 case Instruction::FAdd:
4795 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
4796 I->getFastMathFlags(), Q);
4798 case Instruction::Add:
4799 Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
4800 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4801 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
4803 case Instruction::FSub:
4804 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
4805 I->getFastMathFlags(), Q);
4807 case Instruction::Sub:
4808 Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
4809 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4810 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
4812 case Instruction::FMul:
4813 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
4814 I->getFastMathFlags(), Q);
4816 case Instruction::Mul:
4817 Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q);
4819 case Instruction::SDiv:
4820 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q);
4822 case Instruction::UDiv:
4823 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q);
4825 case Instruction::FDiv:
4826 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
4827 I->getFastMathFlags(), Q);
4829 case Instruction::SRem:
4830 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q);
4832 case Instruction::URem:
4833 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q);
4835 case Instruction::FRem:
4836 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
4837 I->getFastMathFlags(), Q);
4839 case Instruction::Shl:
4840 Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
4841 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4842 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
4844 case Instruction::LShr:
4845 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
4846 cast<BinaryOperator>(I)->isExact(), Q);
4848 case Instruction::AShr:
4849 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
4850 cast<BinaryOperator>(I)->isExact(), Q);
4852 case Instruction::And:
4853 Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q);
4855 case Instruction::Or:
4856 Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q);
4858 case Instruction::Xor:
4859 Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q);
4861 case Instruction::ICmp:
4862 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
4863 I->getOperand(0), I->getOperand(1), Q);
4865 case Instruction::FCmp:
4867 SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0),
4868 I->getOperand(1), I->getFastMathFlags(), Q);
4870 case Instruction::Select:
4871 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
4872 I->getOperand(2), Q);
4874 case Instruction::GetElementPtr: {
4875 SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end());
4876 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
4880 case Instruction::InsertValue: {
4881 InsertValueInst *IV = cast<InsertValueInst>(I);
4882 Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
4883 IV->getInsertedValueOperand(),
4884 IV->getIndices(), Q);
4887 case Instruction::InsertElement: {
4888 auto *IE = cast<InsertElementInst>(I);
4889 Result = SimplifyInsertElementInst(IE->getOperand(0), IE->getOperand(1),
4890 IE->getOperand(2), Q);
4893 case Instruction::ExtractValue: {
4894 auto *EVI = cast<ExtractValueInst>(I);
4895 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
4896 EVI->getIndices(), Q);
4899 case Instruction::ExtractElement: {
4900 auto *EEI = cast<ExtractElementInst>(I);
4901 Result = SimplifyExtractElementInst(EEI->getVectorOperand(),
4902 EEI->getIndexOperand(), Q);
4905 case Instruction::ShuffleVector: {
4906 auto *SVI = cast<ShuffleVectorInst>(I);
4907 Result = SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
4908 SVI->getMask(), SVI->getType(), Q);
4911 case Instruction::PHI:
4912 Result = SimplifyPHINode(cast<PHINode>(I), Q);
4914 case Instruction::Call: {
4915 CallSite CS(cast<CallInst>(I));
4916 Result = SimplifyCall(CS, Q);
4919 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
4920 #include "llvm/IR/Instruction.def"
4921 #undef HANDLE_CAST_INST
4923 SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q);
4925 case Instruction::Alloca:
4926 // No simplifications for Alloca and it can't be constant folded.
4931 // In general, it is possible for computeKnownBits to determine all bits in a
4932 // value even when the operands are not all constants.
4933 if (!Result && I->getType()->isIntOrIntVectorTy()) {
4934 KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
4935 if (Known.isConstant())
4936 Result = ConstantInt::get(I->getType(), Known.getConstant());
4939 /// If called on unreachable code, the above logic may report that the
4940 /// instruction simplified to itself. Make life easier for users by
4941 /// detecting that case here, returning a safe value instead.
4942 return Result == I ? UndefValue::get(I->getType()) : Result;
4945 /// Implementation of recursive simplification through an instruction's
4948 /// This is the common implementation of the recursive simplification routines.
4949 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
4950 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
4951 /// instructions to process and attempt to simplify it using
4952 /// InstructionSimplify.
4954 /// This routine returns 'true' only when *it* simplifies something. The passed
4955 /// in simplified value does not count toward this.
4956 static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
4957 const TargetLibraryInfo *TLI,
4958 const DominatorTree *DT,
4959 AssumptionCache *AC) {
4960 bool Simplified = false;
4961 SmallSetVector<Instruction *, 8> Worklist;
4962 const DataLayout &DL = I->getModule()->getDataLayout();
4964 // If we have an explicit value to collapse to, do that round of the
4965 // simplification loop by hand initially.
4967 for (User *U : I->users())
4969 Worklist.insert(cast<Instruction>(U));
4971 // Replace the instruction with its simplified value.
4972 I->replaceAllUsesWith(SimpleV);
4974 // Gracefully handle edge cases where the instruction is not wired into any
4976 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) &&
4977 !I->mayHaveSideEffects())
4978 I->eraseFromParent();
4983 // Note that we must test the size on each iteration, the worklist can grow.
4984 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
4987 // See if this instruction simplifies.
4988 SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC});
4994 // Stash away all the uses of the old instruction so we can check them for
4995 // recursive simplifications after a RAUW. This is cheaper than checking all
4996 // uses of To on the recursive step in most cases.
4997 for (User *U : I->users())
4998 Worklist.insert(cast<Instruction>(U));
5000 // Replace the instruction with its simplified value.
5001 I->replaceAllUsesWith(SimpleV);
5003 // Gracefully handle edge cases where the instruction is not wired into any
5005 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) &&
5006 !I->mayHaveSideEffects())
5007 I->eraseFromParent();
5012 bool llvm::recursivelySimplifyInstruction(Instruction *I,
5013 const TargetLibraryInfo *TLI,
5014 const DominatorTree *DT,
5015 AssumptionCache *AC) {
5016 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC);
5019 bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
5020 const TargetLibraryInfo *TLI,
5021 const DominatorTree *DT,
5022 AssumptionCache *AC) {
5023 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
5024 assert(SimpleV && "Must provide a simplified value.");
5025 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);
5029 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
5030 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
5031 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
5032 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
5033 auto *TLI = TLIWP ? &TLIWP->getTLI() : nullptr;
5034 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
5035 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
5036 return {F.getParent()->getDataLayout(), TLI, DT, AC};
5039 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
5040 const DataLayout &DL) {
5041 return {DL, &AR.TLI, &AR.DT, &AR.AC};
5044 template <class T, class... TArgs>
5045 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
5047 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
5048 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
5049 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
5050 return {F.getParent()->getDataLayout(), TLI, DT, AC};
5052 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,