1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitAnd, visitOr, and visitXor functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/Analysis/InstructionSimplify.h"
16 #include "llvm/IR/ConstantRange.h"
17 #include "llvm/IR/Intrinsics.h"
18 #include "llvm/IR/PatternMatch.h"
19 #include "llvm/Transforms/Utils/CmpInstAnalysis.h"
20 #include "llvm/Transforms/Utils/Local.h"
22 using namespace PatternMatch;
24 #define DEBUG_TYPE "instcombine"
26 /// Similar to getICmpCode but for FCmpInst. This encodes a fcmp predicate into
28 static unsigned getFCmpCode(FCmpInst::Predicate CC) {
29 assert(FCmpInst::FCMP_FALSE <= CC && CC <= FCmpInst::FCMP_TRUE &&
30 "Unexpected FCmp predicate!");
31 // Take advantage of the bit pattern of FCmpInst::Predicate here.
33 static_assert(FCmpInst::FCMP_FALSE == 0, ""); // 0 0 0 0
34 static_assert(FCmpInst::FCMP_OEQ == 1, ""); // 0 0 0 1
35 static_assert(FCmpInst::FCMP_OGT == 2, ""); // 0 0 1 0
36 static_assert(FCmpInst::FCMP_OGE == 3, ""); // 0 0 1 1
37 static_assert(FCmpInst::FCMP_OLT == 4, ""); // 0 1 0 0
38 static_assert(FCmpInst::FCMP_OLE == 5, ""); // 0 1 0 1
39 static_assert(FCmpInst::FCMP_ONE == 6, ""); // 0 1 1 0
40 static_assert(FCmpInst::FCMP_ORD == 7, ""); // 0 1 1 1
41 static_assert(FCmpInst::FCMP_UNO == 8, ""); // 1 0 0 0
42 static_assert(FCmpInst::FCMP_UEQ == 9, ""); // 1 0 0 1
43 static_assert(FCmpInst::FCMP_UGT == 10, ""); // 1 0 1 0
44 static_assert(FCmpInst::FCMP_UGE == 11, ""); // 1 0 1 1
45 static_assert(FCmpInst::FCMP_ULT == 12, ""); // 1 1 0 0
46 static_assert(FCmpInst::FCMP_ULE == 13, ""); // 1 1 0 1
47 static_assert(FCmpInst::FCMP_UNE == 14, ""); // 1 1 1 0
48 static_assert(FCmpInst::FCMP_TRUE == 15, ""); // 1 1 1 1
52 /// This is the complement of getICmpCode, which turns an opcode and two
53 /// operands into either a constant true or false, or a brand new ICmp
54 /// instruction. The sign is passed in to determine which kind of predicate to
55 /// use in the new icmp instruction.
56 static Value *getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
57 InstCombiner::BuilderTy *Builder) {
58 ICmpInst::Predicate NewPred;
59 if (Value *NewConstant = getICmpValue(Sign, Code, LHS, RHS, NewPred))
61 return Builder->CreateICmp(NewPred, LHS, RHS);
64 /// This is the complement of getFCmpCode, which turns an opcode and two
65 /// operands into either a FCmp instruction, or a true/false constant.
66 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
67 InstCombiner::BuilderTy *Builder) {
68 const auto Pred = static_cast<FCmpInst::Predicate>(Code);
69 assert(FCmpInst::FCMP_FALSE <= Pred && Pred <= FCmpInst::FCMP_TRUE &&
70 "Unexpected FCmp predicate!");
71 if (Pred == FCmpInst::FCMP_FALSE)
72 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
73 if (Pred == FCmpInst::FCMP_TRUE)
74 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
75 return Builder->CreateFCmp(Pred, LHS, RHS);
78 /// \brief Transform BITWISE_OP(BSWAP(A),BSWAP(B)) to BSWAP(BITWISE_OP(A, B))
79 /// \param I Binary operator to transform.
80 /// \return Pointer to node that must replace the original binary operator, or
81 /// null pointer if no transformation was made.
82 Value *InstCombiner::SimplifyBSwap(BinaryOperator &I) {
83 IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
86 if (I.getType()->isVectorTy())
89 // Can only do bitwise ops.
90 if (!I.isBitwiseLogicOp())
93 Value *OldLHS = I.getOperand(0);
94 Value *OldRHS = I.getOperand(1);
95 ConstantInt *ConstLHS = dyn_cast<ConstantInt>(OldLHS);
96 ConstantInt *ConstRHS = dyn_cast<ConstantInt>(OldRHS);
97 IntrinsicInst *IntrLHS = dyn_cast<IntrinsicInst>(OldLHS);
98 IntrinsicInst *IntrRHS = dyn_cast<IntrinsicInst>(OldRHS);
99 bool IsBswapLHS = (IntrLHS && IntrLHS->getIntrinsicID() == Intrinsic::bswap);
100 bool IsBswapRHS = (IntrRHS && IntrRHS->getIntrinsicID() == Intrinsic::bswap);
102 if (!IsBswapLHS && !IsBswapRHS)
105 if (!IsBswapLHS && !ConstLHS)
108 if (!IsBswapRHS && !ConstRHS)
111 /// OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
112 /// OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
113 Value *NewLHS = IsBswapLHS ? IntrLHS->getOperand(0) :
114 Builder->getInt(ConstLHS->getValue().byteSwap());
116 Value *NewRHS = IsBswapRHS ? IntrRHS->getOperand(0) :
117 Builder->getInt(ConstRHS->getValue().byteSwap());
119 Value *BinOp = Builder->CreateBinOp(I.getOpcode(), NewLHS, NewRHS);
120 Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap, ITy);
121 return Builder->CreateCall(F, BinOp);
124 /// This handles expressions of the form ((val OP C1) & C2). Where
125 /// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'.
126 Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
129 BinaryOperator &TheAnd) {
130 Value *X = Op->getOperand(0);
131 Constant *Together = nullptr;
133 Together = ConstantExpr::getAnd(AndRHS, OpRHS);
135 switch (Op->getOpcode()) {
137 case Instruction::Xor:
138 if (Op->hasOneUse()) {
139 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
140 Value *And = Builder->CreateAnd(X, AndRHS);
142 return BinaryOperator::CreateXor(And, Together);
145 case Instruction::Or:
146 if (Op->hasOneUse()){
147 ConstantInt *TogetherCI = dyn_cast<ConstantInt>(Together);
148 if (TogetherCI && !TogetherCI->isZero()){
149 // (X | C1) & C2 --> (X & (C2^(C1&C2))) | C1
150 // NOTE: This reduces the number of bits set in the & mask, which
151 // can expose opportunities for store narrowing.
152 Together = ConstantExpr::getXor(AndRHS, Together);
153 Value *And = Builder->CreateAnd(X, Together);
155 return BinaryOperator::CreateOr(And, OpRHS);
160 case Instruction::Add:
161 if (Op->hasOneUse()) {
162 // Adding a one to a single bit bit-field should be turned into an XOR
163 // of the bit. First thing to check is to see if this AND is with a
164 // single bit constant.
165 const APInt &AndRHSV = AndRHS->getValue();
167 // If there is only one bit set.
168 if (AndRHSV.isPowerOf2()) {
169 // Ok, at this point, we know that we are masking the result of the
170 // ADD down to exactly one bit. If the constant we are adding has
171 // no bits set below this bit, then we can eliminate the ADD.
172 const APInt& AddRHS = OpRHS->getValue();
174 // Check to see if any bits below the one bit set in AndRHSV are set.
175 if ((AddRHS & (AndRHSV - 1)).isNullValue()) {
176 // If not, the only thing that can effect the output of the AND is
177 // the bit specified by AndRHSV. If that bit is set, the effect of
178 // the XOR is to toggle the bit. If it is clear, then the ADD has
180 if ((AddRHS & AndRHSV).isNullValue()) { // Bit is not set, noop
181 TheAnd.setOperand(0, X);
184 // Pull the XOR out of the AND.
185 Value *NewAnd = Builder->CreateAnd(X, AndRHS);
186 NewAnd->takeName(Op);
187 return BinaryOperator::CreateXor(NewAnd, AndRHS);
194 case Instruction::Shl: {
195 // We know that the AND will not produce any of the bits shifted in, so if
196 // the anded constant includes them, clear them now!
198 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
199 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
200 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
201 ConstantInt *CI = Builder->getInt(AndRHS->getValue() & ShlMask);
203 if (CI->getValue() == ShlMask)
204 // Masking out bits that the shift already masks.
205 return replaceInstUsesWith(TheAnd, Op); // No need for the and.
207 if (CI != AndRHS) { // Reducing bits set in and.
208 TheAnd.setOperand(1, CI);
213 case Instruction::LShr: {
214 // We know that the AND will not produce any of the bits shifted in, so if
215 // the anded constant includes them, clear them now! This only applies to
216 // unsigned shifts, because a signed shr may bring in set bits!
218 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
219 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
220 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
221 ConstantInt *CI = Builder->getInt(AndRHS->getValue() & ShrMask);
223 if (CI->getValue() == ShrMask)
224 // Masking out bits that the shift already masks.
225 return replaceInstUsesWith(TheAnd, Op);
228 TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
233 case Instruction::AShr:
235 // See if this is shifting in some sign extension, then masking it out
237 if (Op->hasOneUse()) {
238 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
239 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
240 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
241 Constant *C = Builder->getInt(AndRHS->getValue() & ShrMask);
242 if (C == AndRHS) { // Masking out bits shifted in.
243 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
244 // Make the argument unsigned.
245 Value *ShVal = Op->getOperand(0);
246 ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
247 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
255 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
256 /// (V < Lo || V >= Hi). This method expects that Lo <= Hi. IsSigned indicates
257 /// whether to treat V, Lo, and Hi as signed or not.
258 Value *InstCombiner::insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
259 bool isSigned, bool Inside) {
260 assert((isSigned ? Lo.sle(Hi) : Lo.ule(Hi)) &&
261 "Lo is not <= Hi in range emission code!");
263 Type *Ty = V->getType();
265 return Inside ? ConstantInt::getFalse(Ty) : ConstantInt::getTrue(Ty);
267 // V >= Min && V < Hi --> V < Hi
268 // V < Min || V >= Hi --> V >= Hi
269 ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
270 if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
271 Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
272 return Builder->CreateICmp(Pred, V, ConstantInt::get(Ty, Hi));
275 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo
276 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo
278 Builder->CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off");
279 Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo);
280 return Builder->CreateICmp(Pred, VMinusLo, HiMinusLo);
283 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
284 /// that can be simplified.
285 /// One of A and B is considered the mask. The other is the value. This is
286 /// described as the "AMask" or "BMask" part of the enum. If the enum contains
287 /// only "Mask", then both A and B can be considered masks. If A is the mask,
288 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
289 /// If both A and C are constants, this proof is also easy.
290 /// For the following explanations, we assume that A is the mask.
292 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all
293 /// bits of A are set in B.
294 /// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
296 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
297 /// bits of A are cleared in B.
298 /// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
300 /// "Mixed" declares that (A & B) == C and C might or might not contain any
301 /// number of one bits and zero bits.
302 /// Example: (icmp eq (A & 3), 1) -> AMask_Mixed
304 /// "Not" means that in above descriptions "==" should be replaced by "!=".
305 /// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
307 /// If the mask A contains a single bit, then the following is equivalent:
308 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
309 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
310 enum MaskedICmpType {
312 AMask_NotAllOnes = 2,
314 BMask_NotAllOnes = 8,
316 Mask_NotAllZeros = 32,
318 AMask_NotMixed = 128,
323 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
325 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C,
326 ICmpInst::Predicate Pred) {
327 ConstantInt *ACst = dyn_cast<ConstantInt>(A);
328 ConstantInt *BCst = dyn_cast<ConstantInt>(B);
329 ConstantInt *CCst = dyn_cast<ConstantInt>(C);
330 bool IsEq = (Pred == ICmpInst::ICMP_EQ);
331 bool IsAPow2 = (ACst && !ACst->isZero() && ACst->getValue().isPowerOf2());
332 bool IsBPow2 = (BCst && !BCst->isZero() && BCst->getValue().isPowerOf2());
333 unsigned MaskVal = 0;
334 if (CCst && CCst->isZero()) {
335 // if C is zero, then both A and B qualify as mask
336 MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed)
337 : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed));
339 MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed)
340 : (AMask_AllOnes | AMask_Mixed));
342 MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed)
343 : (BMask_AllOnes | BMask_Mixed));
348 MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed)
349 : (AMask_NotAllOnes | AMask_NotMixed));
351 MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed)
352 : (Mask_AllZeros | AMask_Mixed));
353 } else if (ACst && CCst && ConstantExpr::getAnd(ACst, CCst) == CCst) {
354 MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed);
358 MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed)
359 : (BMask_NotAllOnes | BMask_NotMixed));
361 MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed)
362 : (Mask_AllZeros | BMask_Mixed));
363 } else if (BCst && CCst && ConstantExpr::getAnd(BCst, CCst) == CCst) {
364 MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed);
370 /// Convert an analysis of a masked ICmp into its equivalent if all boolean
371 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
372 /// is adjacent to the corresponding normal flag (recording ==), this just
373 /// involves swapping those bits over.
374 static unsigned conjugateICmpMask(unsigned Mask) {
376 NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros |
377 AMask_Mixed | BMask_Mixed))
380 NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros |
381 AMask_NotMixed | BMask_NotMixed))
387 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
388 /// Return the set of pattern classes (from MaskedICmpType) that both LHS and
390 static unsigned getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C,
391 Value *&D, Value *&E, ICmpInst *LHS,
393 ICmpInst::Predicate &PredL,
394 ICmpInst::Predicate &PredR) {
395 if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType())
397 // vectors are not (yet?) supported
398 if (LHS->getOperand(0)->getType()->isVectorTy())
401 // Here comes the tricky part:
402 // LHS might be of the form L11 & L12 == X, X == L21 & L22,
403 // and L11 & L12 == L21 & L22. The same goes for RHS.
404 // Now we must find those components L** and R**, that are equal, so
405 // that we can extract the parameters A, B, C, D, and E for the canonical
407 Value *L1 = LHS->getOperand(0);
408 Value *L2 = LHS->getOperand(1);
409 Value *L11, *L12, *L21, *L22;
410 // Check whether the icmp can be decomposed into a bit test.
411 if (decomposeBitTestICmp(LHS, PredL, L11, L12, L2)) {
412 L21 = L22 = L1 = nullptr;
414 // Look for ANDs in the LHS icmp.
415 if (!L1->getType()->isIntegerTy()) {
416 // You can icmp pointers, for example. They really aren't masks.
418 } else if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) {
419 // Any icmp can be viewed as being trivially masked; if it allows us to
420 // remove one, it's worth it.
422 L12 = Constant::getAllOnesValue(L1->getType());
425 if (!L2->getType()->isIntegerTy()) {
426 // You can icmp pointers, for example. They really aren't masks.
428 } else if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) {
430 L22 = Constant::getAllOnesValue(L2->getType());
434 // Bail if LHS was a icmp that can't be decomposed into an equality.
435 if (!ICmpInst::isEquality(PredL))
438 Value *R1 = RHS->getOperand(0);
439 Value *R2 = RHS->getOperand(1);
442 if (decomposeBitTestICmp(RHS, PredR, R11, R12, R2)) {
443 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
446 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
455 } else if (R1->getType()->isIntegerTy()) {
456 if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) {
457 // As before, model no mask as a trivial mask if it'll let us do an
460 R12 = Constant::getAllOnesValue(R1->getType());
463 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
468 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
476 // Bail if RHS was a icmp that can't be decomposed into an equality.
477 if (!ICmpInst::isEquality(PredR))
480 // Look for ANDs on the right side of the RHS icmp.
481 if (!Ok && R2->getType()->isIntegerTy()) {
482 if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) {
484 R12 = Constant::getAllOnesValue(R2->getType());
487 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
492 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
507 } else if (L12 == A) {
510 } else if (L21 == A) {
513 } else if (L22 == A) {
518 unsigned LeftType = getMaskedICmpType(A, B, C, PredL);
519 unsigned RightType = getMaskedICmpType(A, D, E, PredR);
520 return LeftType & RightType;
523 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
524 /// into a single (icmp(A & X) ==/!= Y).
525 static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
526 llvm::InstCombiner::BuilderTy *Builder) {
527 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
528 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
530 getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR);
534 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
535 "Expected equality predicates for masked type of icmps.");
537 // In full generality:
538 // (icmp (A & B) Op C) | (icmp (A & D) Op E)
539 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
541 // If the latter can be converted into (icmp (A & X) Op Y) then the former is
542 // equivalent to (icmp (A & X) !Op Y).
544 // Therefore, we can pretend for the rest of this function that we're dealing
545 // with the conjunction, provided we flip the sense of any comparisons (both
546 // input and output).
548 // In most cases we're going to produce an EQ for the "&&" case.
549 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
551 // Convert the masking analysis into its equivalent with negated
553 Mask = conjugateICmpMask(Mask);
556 if (Mask & Mask_AllZeros) {
557 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
558 // -> (icmp eq (A & (B|D)), 0)
559 Value *NewOr = Builder->CreateOr(B, D);
560 Value *NewAnd = Builder->CreateAnd(A, NewOr);
561 // We can't use C as zero because we might actually handle
562 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
563 // with B and D, having a single bit set.
564 Value *Zero = Constant::getNullValue(A->getType());
565 return Builder->CreateICmp(NewCC, NewAnd, Zero);
567 if (Mask & BMask_AllOnes) {
568 // (icmp eq (A & B), B) & (icmp eq (A & D), D)
569 // -> (icmp eq (A & (B|D)), (B|D))
570 Value *NewOr = Builder->CreateOr(B, D);
571 Value *NewAnd = Builder->CreateAnd(A, NewOr);
572 return Builder->CreateICmp(NewCC, NewAnd, NewOr);
574 if (Mask & AMask_AllOnes) {
575 // (icmp eq (A & B), A) & (icmp eq (A & D), A)
576 // -> (icmp eq (A & (B&D)), A)
577 Value *NewAnd1 = Builder->CreateAnd(B, D);
578 Value *NewAnd2 = Builder->CreateAnd(A, NewAnd1);
579 return Builder->CreateICmp(NewCC, NewAnd2, A);
582 // Remaining cases assume at least that B and D are constant, and depend on
583 // their actual values. This isn't strictly necessary, just a "handle the
584 // easy cases for now" decision.
585 ConstantInt *BCst = dyn_cast<ConstantInt>(B);
588 ConstantInt *DCst = dyn_cast<ConstantInt>(D);
592 if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) {
593 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
594 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
595 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
596 // Only valid if one of the masks is a superset of the other (check "B&D" is
597 // the same as either B or D).
598 APInt NewMask = BCst->getValue() & DCst->getValue();
600 if (NewMask == BCst->getValue())
602 else if (NewMask == DCst->getValue())
606 if (Mask & AMask_NotAllOnes) {
607 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
608 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
609 // Only valid if one of the masks is a superset of the other (check "B|D" is
610 // the same as either B or D).
611 APInt NewMask = BCst->getValue() | DCst->getValue();
613 if (NewMask == BCst->getValue())
615 else if (NewMask == DCst->getValue())
619 if (Mask & BMask_Mixed) {
620 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
621 // We already know that B & C == C && D & E == E.
622 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
623 // C and E, which are shared by both the mask B and the mask D, don't
624 // contradict, then we can transform to
625 // -> (icmp eq (A & (B|D)), (C|E))
626 // Currently, we only handle the case of B, C, D, and E being constant.
627 // We can't simply use C and E because we might actually handle
628 // (icmp ne (A & B), B) & (icmp eq (A & D), D)
629 // with B and D, having a single bit set.
630 ConstantInt *CCst = dyn_cast<ConstantInt>(C);
633 ConstantInt *ECst = dyn_cast<ConstantInt>(E);
637 CCst = cast<ConstantInt>(ConstantExpr::getXor(BCst, CCst));
639 ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst));
641 // If there is a conflict, we should actually return a false for the
643 if (((BCst->getValue() & DCst->getValue()) &
644 (CCst->getValue() ^ ECst->getValue())).getBoolValue())
645 return ConstantInt::get(LHS->getType(), !IsAnd);
647 Value *NewOr1 = Builder->CreateOr(B, D);
648 Value *NewOr2 = ConstantExpr::getOr(CCst, ECst);
649 Value *NewAnd = Builder->CreateAnd(A, NewOr1);
650 return Builder->CreateICmp(NewCC, NewAnd, NewOr2);
656 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
657 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
658 /// If \p Inverted is true then the check is for the inverted range, e.g.
659 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
660 Value *InstCombiner::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
662 // Check the lower range comparison, e.g. x >= 0
663 // InstCombine already ensured that if there is a constant it's on the RHS.
664 ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1));
668 ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() :
669 Cmp0->getPredicate());
671 // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
672 if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) ||
673 (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero())))
676 ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() :
677 Cmp1->getPredicate());
679 Value *Input = Cmp0->getOperand(0);
681 if (Cmp1->getOperand(0) == Input) {
682 // For the upper range compare we have: icmp x, n
683 RangeEnd = Cmp1->getOperand(1);
684 } else if (Cmp1->getOperand(1) == Input) {
685 // For the upper range compare we have: icmp n, x
686 RangeEnd = Cmp1->getOperand(0);
687 Pred1 = ICmpInst::getSwappedPredicate(Pred1);
692 // Check the upper range comparison, e.g. x < n
693 ICmpInst::Predicate NewPred;
695 case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break;
696 case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break;
697 default: return nullptr;
700 // This simplification is only valid if the upper range is not negative.
701 KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1);
702 if (!Known.isNonNegative())
706 NewPred = ICmpInst::getInversePredicate(NewPred);
708 return Builder->CreateICmp(NewPred, Input, RangeEnd);
712 foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS,
714 InstCombiner::BuilderTy *Builder) {
715 Value *X = LHS->getOperand(0);
716 if (X != RHS->getOperand(0))
719 const APInt *C1, *C2;
720 if (!match(LHS->getOperand(1), m_APInt(C1)) ||
721 !match(RHS->getOperand(1), m_APInt(C2)))
724 // We only handle (X != C1 && X != C2) and (X == C1 || X == C2).
725 ICmpInst::Predicate Pred = LHS->getPredicate();
726 if (Pred != RHS->getPredicate())
728 if (JoinedByAnd && Pred != ICmpInst::ICMP_NE)
730 if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ)
733 // The larger unsigned constant goes on the right.
737 APInt Xor = *C1 ^ *C2;
738 if (Xor.isPowerOf2()) {
739 // If LHSC and RHSC differ by only one bit, then set that bit in X and
740 // compare against the larger constant:
741 // (X == C1 || X == C2) --> (X | (C1 ^ C2)) == C2
742 // (X != C1 && X != C2) --> (X | (C1 ^ C2)) != C2
743 // We choose an 'or' with a Pow2 constant rather than the inverse mask with
744 // 'and' because that may lead to smaller codegen from a smaller constant.
745 Value *Or = Builder->CreateOr(X, ConstantInt::get(X->getType(), Xor));
746 return Builder->CreateICmp(Pred, Or, ConstantInt::get(X->getType(), *C2));
749 // Special case: get the ordering right when the values wrap around zero.
750 // Ie, we assumed the constants were unsigned when swapping earlier.
751 if (C1->isNullValue() && C2->isAllOnesValue())
754 if (*C1 == *C2 - 1) {
755 // (X == 13 || X == 14) --> X - 13 <=u 1
756 // (X != 13 && X != 14) --> X - 13 >u 1
757 // An 'add' is the canonical IR form, so favor that over a 'sub'.
758 Value *Add = Builder->CreateAdd(X, ConstantInt::get(X->getType(), -(*C1)));
759 auto NewPred = JoinedByAnd ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_ULE;
760 return Builder->CreateICmp(NewPred, Add, ConstantInt::get(X->getType(), 1));
766 /// Fold (icmp)&(icmp) if possible.
767 Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
768 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
770 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
771 if (PredicatesFoldable(PredL, PredR)) {
772 if (LHS->getOperand(0) == RHS->getOperand(1) &&
773 LHS->getOperand(1) == RHS->getOperand(0))
775 if (LHS->getOperand(0) == RHS->getOperand(0) &&
776 LHS->getOperand(1) == RHS->getOperand(1)) {
777 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
778 unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
779 bool isSigned = LHS->isSigned() || RHS->isSigned();
780 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
784 // handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E)
785 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, true, Builder))
788 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
789 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/false))
792 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
793 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/false))
796 if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, true, Builder))
799 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
800 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
801 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
802 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
806 if (LHSC == RHSC && PredL == PredR) {
807 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
808 // where C is a power of 2 or
809 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
810 if ((PredL == ICmpInst::ICMP_ULT && LHSC->getValue().isPowerOf2()) ||
811 (PredL == ICmpInst::ICMP_EQ && LHSC->isZero())) {
812 Value *NewOr = Builder->CreateOr(LHS0, RHS0);
813 return Builder->CreateICmp(PredL, NewOr, LHSC);
817 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
818 // where CMAX is the all ones value for the truncated type,
819 // iff the lower bits of C2 and CA are zero.
820 if (PredL == ICmpInst::ICMP_EQ && PredL == PredR && LHS->hasOneUse() &&
823 ConstantInt *AndC, *SmallC = nullptr, *BigC = nullptr;
825 // (trunc x) == C1 & (and x, CA) == C2
826 // (and x, CA) == C2 & (trunc x) == C1
827 if (match(RHS0, m_Trunc(m_Value(V))) &&
828 match(LHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) {
831 } else if (match(LHS0, m_Trunc(m_Value(V))) &&
832 match(RHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) {
837 if (SmallC && BigC) {
838 unsigned BigBitSize = BigC->getType()->getBitWidth();
839 unsigned SmallBitSize = SmallC->getType()->getBitWidth();
841 // Check that the low bits are zero.
842 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
843 if ((Low & AndC->getValue()).isNullValue() &&
844 (Low & BigC->getValue()).isNullValue()) {
845 Value *NewAnd = Builder->CreateAnd(V, Low | AndC->getValue());
846 APInt N = SmallC->getValue().zext(BigBitSize) | BigC->getValue();
847 Value *NewVal = ConstantInt::get(AndC->getType()->getContext(), N);
848 return Builder->CreateICmp(PredL, NewAnd, NewVal);
853 // From here on, we only handle:
854 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
858 // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere.
859 if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE ||
860 PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE ||
861 PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE ||
862 PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE)
865 // We can't fold (ugt x, C) & (sgt x, C2).
866 if (!PredicatesFoldable(PredL, PredR))
869 // Ensure that the larger constant is on the RHS.
871 if (CmpInst::isSigned(PredL) ||
872 (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR)))
873 ShouldSwap = LHSC->getValue().sgt(RHSC->getValue());
875 ShouldSwap = LHSC->getValue().ugt(RHSC->getValue());
879 std::swap(LHSC, RHSC);
880 std::swap(PredL, PredR);
883 // At this point, we know we have two icmp instructions
884 // comparing a value against two constants and and'ing the result
885 // together. Because of the above check, we know that we only have
886 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
887 // (from the icmp folding check above), that the two constants
888 // are not equal and that the larger constant is on the RHS
889 assert(LHSC != RHSC && "Compares not folded above?");
893 llvm_unreachable("Unknown integer condition code!");
894 case ICmpInst::ICMP_NE:
897 llvm_unreachable("Unknown integer condition code!");
898 case ICmpInst::ICMP_ULT:
899 if (LHSC == SubOne(RHSC)) // (X != 13 & X u< 14) -> X < 13
900 return Builder->CreateICmpULT(LHS0, LHSC);
901 if (LHSC->isNullValue()) // (X != 0 & X u< 14) -> X-1 u< 13
902 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
904 break; // (X != 13 & X u< 15) -> no change
905 case ICmpInst::ICMP_SLT:
906 if (LHSC == SubOne(RHSC)) // (X != 13 & X s< 14) -> X < 13
907 return Builder->CreateICmpSLT(LHS0, LHSC);
908 break; // (X != 13 & X s< 15) -> no change
909 case ICmpInst::ICMP_NE:
910 // Potential folds for this case should already be handled.
914 case ICmpInst::ICMP_UGT:
917 llvm_unreachable("Unknown integer condition code!");
918 case ICmpInst::ICMP_NE:
919 if (RHSC == AddOne(LHSC)) // (X u> 13 & X != 14) -> X u> 14
920 return Builder->CreateICmp(PredL, LHS0, RHSC);
921 break; // (X u> 13 & X != 15) -> no change
922 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
923 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
927 case ICmpInst::ICMP_SGT:
930 llvm_unreachable("Unknown integer condition code!");
931 case ICmpInst::ICMP_NE:
932 if (RHSC == AddOne(LHSC)) // (X s> 13 & X != 14) -> X s> 14
933 return Builder->CreateICmp(PredL, LHS0, RHSC);
934 break; // (X s> 13 & X != 15) -> no change
935 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
936 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), true,
945 /// Optimize (fcmp)&(fcmp). NOTE: Unlike the rest of instcombine, this returns
946 /// a Value which should already be inserted into the function.
947 Value *InstCombiner::foldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
948 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
949 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
950 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
952 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
953 // Swap RHS operands to match LHS.
954 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
955 std::swap(Op1LHS, Op1RHS);
958 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
959 // Suppose the relation between x and y is R, where R is one of
960 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for
961 // testing the desired relations.
963 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
964 // bool(R & CC0) && bool(R & CC1)
965 // = bool((R & CC0) & (R & CC1))
966 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency
967 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS)
968 return getFCmpValue(getFCmpCode(Op0CC) & getFCmpCode(Op1CC), Op0LHS, Op0RHS,
971 if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
972 RHS->getPredicate() == FCmpInst::FCMP_ORD) {
973 if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType())
976 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
977 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
978 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
979 // If either of the constants are nans, then the whole thing returns
981 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
982 return Builder->getFalse();
983 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
986 // Handle vector zeros. This occurs because the canonical form of
987 // "fcmp ord x,x" is "fcmp ord x, 0".
988 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
989 isa<ConstantAggregateZero>(RHS->getOperand(1)))
990 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
997 /// Match De Morgan's Laws:
998 /// (~A & ~B) == (~(A | B))
999 /// (~A | ~B) == (~(A & B))
1000 static Instruction *matchDeMorgansLaws(BinaryOperator &I,
1001 InstCombiner::BuilderTy &Builder) {
1002 auto Opcode = I.getOpcode();
1003 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1004 "Trying to match De Morgan's Laws with something other than and/or");
1006 // Flip the logic operation.
1007 Opcode = (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1010 if (match(I.getOperand(0), m_OneUse(m_Not(m_Value(A)))) &&
1011 match(I.getOperand(1), m_OneUse(m_Not(m_Value(B)))) &&
1012 !IsFreeToInvert(A, A->hasOneUse()) &&
1013 !IsFreeToInvert(B, B->hasOneUse())) {
1014 Value *AndOr = Builder.CreateBinOp(Opcode, A, B, I.getName() + ".demorgan");
1015 return BinaryOperator::CreateNot(AndOr);
1021 bool InstCombiner::shouldOptimizeCast(CastInst *CI) {
1022 Value *CastSrc = CI->getOperand(0);
1024 // Noop casts and casts of constants should be eliminated trivially.
1025 if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc))
1028 // If this cast is paired with another cast that can be eliminated, we prefer
1029 // to have it eliminated.
1030 if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc))
1031 if (isEliminableCastPair(PrecedingCI, CI))
1034 // If this is a vector sext from a compare, then we don't want to break the
1035 // idiom where each element of the extended vector is either zero or all ones.
1036 if (CI->getOpcode() == Instruction::SExt &&
1037 isa<CmpInst>(CastSrc) && CI->getDestTy()->isVectorTy())
1043 /// Fold {and,or,xor} (cast X), C.
1044 static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
1045 InstCombiner::BuilderTy *Builder) {
1047 if (!match(Logic.getOperand(1), m_Constant(C)))
1050 auto LogicOpc = Logic.getOpcode();
1051 Type *DestTy = Logic.getType();
1052 Type *SrcTy = Cast->getSrcTy();
1054 // If the first operand is bitcast, move the logic operation ahead of the
1055 // bitcast (do the logic operation in the original type). This can eliminate
1056 // bitcasts and allow combines that would otherwise be impeded by the bitcast.
1058 if (match(Cast, m_BitCast(m_Value(X)))) {
1059 Value *NewConstant = ConstantExpr::getBitCast(C, SrcTy);
1060 Value *NewOp = Builder->CreateBinOp(LogicOpc, X, NewConstant);
1061 return CastInst::CreateBitOrPointerCast(NewOp, DestTy);
1064 // Similarly, move the logic operation ahead of a zext if the constant is
1065 // unchanged in the smaller source type. Performing the logic in a smaller
1066 // type may provide more information to later folds, and the smaller logic
1067 // instruction may be cheaper (particularly in the case of vectors).
1068 if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) {
1069 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy);
1070 Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy);
1071 if (ZextTruncC == C) {
1072 // LogicOpc (zext X), C --> zext (LogicOpc X, C)
1073 Value *NewOp = Builder->CreateBinOp(LogicOpc, X, TruncC);
1074 return new ZExtInst(NewOp, DestTy);
1081 /// Fold {and,or,xor} (cast X), Y.
1082 Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) {
1083 auto LogicOpc = I.getOpcode();
1084 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding");
1086 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1087 CastInst *Cast0 = dyn_cast<CastInst>(Op0);
1091 // This must be a cast from an integer or integer vector source type to allow
1092 // transformation of the logic operation to the source type.
1093 Type *DestTy = I.getType();
1094 Type *SrcTy = Cast0->getSrcTy();
1095 if (!SrcTy->isIntOrIntVectorTy())
1098 if (Instruction *Ret = foldLogicCastConstant(I, Cast0, Builder))
1101 CastInst *Cast1 = dyn_cast<CastInst>(Op1);
1105 // Both operands of the logic operation are casts. The casts must be of the
1106 // same type for reduction.
1107 auto CastOpcode = Cast0->getOpcode();
1108 if (CastOpcode != Cast1->getOpcode() || SrcTy != Cast1->getSrcTy())
1111 Value *Cast0Src = Cast0->getOperand(0);
1112 Value *Cast1Src = Cast1->getOperand(0);
1114 // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1115 if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
1116 Value *NewOp = Builder->CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
1118 return CastInst::Create(CastOpcode, NewOp, DestTy);
1121 // For now, only 'and'/'or' have optimizations after this.
1122 if (LogicOpc == Instruction::Xor)
1125 // If this is logic(cast(icmp), cast(icmp)), try to fold this even if the
1126 // cast is otherwise not optimizable. This happens for vector sexts.
1127 ICmpInst *ICmp0 = dyn_cast<ICmpInst>(Cast0Src);
1128 ICmpInst *ICmp1 = dyn_cast<ICmpInst>(Cast1Src);
1129 if (ICmp0 && ICmp1) {
1130 Value *Res = LogicOpc == Instruction::And ? foldAndOfICmps(ICmp0, ICmp1)
1131 : foldOrOfICmps(ICmp0, ICmp1, I);
1133 return CastInst::Create(CastOpcode, Res, DestTy);
1137 // If this is logic(cast(fcmp), cast(fcmp)), try to fold this even if the
1138 // cast is otherwise not optimizable. This happens for vector sexts.
1139 FCmpInst *FCmp0 = dyn_cast<FCmpInst>(Cast0Src);
1140 FCmpInst *FCmp1 = dyn_cast<FCmpInst>(Cast1Src);
1141 if (FCmp0 && FCmp1) {
1142 Value *Res = LogicOpc == Instruction::And ? foldAndOfFCmps(FCmp0, FCmp1)
1143 : foldOrOfFCmps(FCmp0, FCmp1);
1145 return CastInst::Create(CastOpcode, Res, DestTy);
1152 static Instruction *foldBoolSextMaskToSelect(BinaryOperator &I) {
1153 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1155 // Canonicalize SExt or Not to the LHS
1156 if (match(Op1, m_SExt(m_Value())) || match(Op1, m_Not(m_Value()))) {
1157 std::swap(Op0, Op1);
1160 // Fold (and (sext bool to A), B) --> (select bool, B, 0)
1162 if (match(Op0, m_SExt(m_Value(X))) &&
1163 X->getType()->getScalarType()->isIntegerTy(1)) {
1164 Value *Zero = Constant::getNullValue(Op1->getType());
1165 return SelectInst::Create(X, Op1, Zero);
1168 // Fold (and ~(sext bool to A), B) --> (select bool, 0, B)
1169 if (match(Op0, m_Not(m_SExt(m_Value(X)))) &&
1170 X->getType()->getScalarType()->isIntegerTy(1)) {
1171 Value *Zero = Constant::getNullValue(Op0->getType());
1172 return SelectInst::Create(X, Zero, Op1);
1178 static Instruction *foldAndToXor(BinaryOperator &I,
1179 InstCombiner::BuilderTy &Builder) {
1180 assert(I.getOpcode() == Instruction::And);
1181 Value *Op0 = I.getOperand(0);
1182 Value *Op1 = I.getOperand(1);
1185 // Operand complexity canonicalization guarantees that the 'or' is Op0.
1186 // (A | B) & ~(A & B) --> A ^ B
1187 // (A | B) & ~(B & A) --> A ^ B
1188 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
1189 match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B)))))
1190 return BinaryOperator::CreateXor(A, B);
1192 // (A | ~B) & (~A | B) --> ~(A ^ B)
1193 // (A | ~B) & (B | ~A) --> ~(A ^ B)
1194 // (~B | A) & (~A | B) --> ~(A ^ B)
1195 // (~B | A) & (B | ~A) --> ~(A ^ B)
1196 if (match(Op0, m_c_Or(m_Value(A), m_Not(m_Value(B)))) &&
1197 match(Op1, m_c_Or(m_Not(m_Specific(A)), m_Specific(B))))
1198 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1203 static Instruction *foldOrToXor(BinaryOperator &I,
1204 InstCombiner::BuilderTy &Builder) {
1205 assert(I.getOpcode() == Instruction::Or);
1206 Value *Op0 = I.getOperand(0);
1207 Value *Op1 = I.getOperand(1);
1210 // Operand complexity canonicalization guarantees that the 'and' is Op0.
1211 // (A & B) | ~(A | B) --> ~(A ^ B)
1212 // (A & B) | ~(B | A) --> ~(A ^ B)
1213 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1214 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
1215 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1217 // (A & ~B) | (~A & B) --> A ^ B
1218 // (A & ~B) | (B & ~A) --> A ^ B
1219 // (~B & A) | (~A & B) --> A ^ B
1220 // (~B & A) | (B & ~A) --> A ^ B
1221 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
1222 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))
1223 return BinaryOperator::CreateXor(A, B);
1228 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
1229 // here. We should standardize that construct where it is needed or choose some
1230 // other way to ensure that commutated variants of patterns are not missed.
1231 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
1232 bool Changed = SimplifyAssociativeOrCommutative(I);
1233 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1235 if (Value *V = SimplifyVectorOp(I))
1236 return replaceInstUsesWith(I, V);
1238 if (Value *V = SimplifyAndInst(Op0, Op1, SQ.getWithInstruction(&I)))
1239 return replaceInstUsesWith(I, V);
1241 // See if we can simplify any instructions used by the instruction whose sole
1242 // purpose is to compute bits we don't care about.
1243 if (SimplifyDemandedInstructionBits(I))
1246 // Do this before using distributive laws to catch simple and/or/not patterns.
1247 if (Instruction *Xor = foldAndToXor(I, *Builder))
1250 // (A|B)&(A|C) -> A|(B&C) etc
1251 if (Value *V = SimplifyUsingDistributiveLaws(I))
1252 return replaceInstUsesWith(I, V);
1254 if (Value *V = SimplifyBSwap(I))
1255 return replaceInstUsesWith(I, V);
1257 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
1258 const APInt &AndRHSMask = AndRHS->getValue();
1260 // Optimize a variety of ((val OP C1) & C2) combinations...
1261 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
1262 Value *Op0LHS = Op0I->getOperand(0);
1263 Value *Op0RHS = Op0I->getOperand(1);
1264 switch (Op0I->getOpcode()) {
1266 case Instruction::Xor:
1267 case Instruction::Or: {
1268 // If the mask is only needed on one incoming arm, push it up.
1269 if (!Op0I->hasOneUse()) break;
1271 APInt NotAndRHS(~AndRHSMask);
1272 if (MaskedValueIsZero(Op0LHS, NotAndRHS, 0, &I)) {
1273 // Not masking anything out for the LHS, move to RHS.
1274 Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
1275 Op0RHS->getName()+".masked");
1276 return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
1278 if (!isa<Constant>(Op0RHS) &&
1279 MaskedValueIsZero(Op0RHS, NotAndRHS, 0, &I)) {
1280 // Not masking anything out for the RHS, move to LHS.
1281 Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
1282 Op0LHS->getName()+".masked");
1283 return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
1288 case Instruction::Sub:
1290 if (AndRHSMask.isOneValue() && match(Op0LHS, m_Zero()))
1291 return BinaryOperator::CreateAnd(Op0RHS, AndRHS);
1295 case Instruction::Shl:
1296 case Instruction::LShr:
1297 // (1 << x) & 1 --> zext(x == 0)
1298 // (1 >> x) & 1 --> zext(x == 0)
1299 if (AndRHSMask.isOneValue() && Op0LHS == AndRHS) {
1301 Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
1302 return new ZExtInst(NewICmp, I.getType());
1307 // ((C1 OP zext(X)) & C2) -> zext((C1-X) & C2) if C2 fits in the bitwidth
1308 // of X and OP behaves well when given trunc(C1) and X.
1309 switch (Op0I->getOpcode()) {
1312 case Instruction::Xor:
1313 case Instruction::Or:
1314 case Instruction::Mul:
1315 case Instruction::Add:
1316 case Instruction::Sub:
1319 if (match(Op0I, m_c_BinOp(m_ZExt(m_Value(X)), m_ConstantInt(C1)))) {
1320 if (AndRHSMask.isIntN(X->getType()->getScalarSizeInBits())) {
1321 auto *TruncC1 = ConstantExpr::getTrunc(C1, X->getType());
1323 if (isa<ZExtInst>(Op0LHS))
1324 BinOp = Builder->CreateBinOp(Op0I->getOpcode(), X, TruncC1);
1326 BinOp = Builder->CreateBinOp(Op0I->getOpcode(), TruncC1, X);
1327 auto *TruncC2 = ConstantExpr::getTrunc(AndRHS, X->getType());
1328 auto *And = Builder->CreateAnd(BinOp, TruncC2);
1329 return new ZExtInst(And, I.getType());
1334 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
1335 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
1339 // If this is an integer truncation, and if the source is an 'and' with
1340 // immediate, transform it. This frequently occurs for bitfield accesses.
1342 Value *X = nullptr; ConstantInt *YC = nullptr;
1343 if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) {
1344 // Change: and (trunc (and X, YC) to T), C2
1345 // into : and (trunc X to T), trunc(YC) & C2
1346 // This will fold the two constants together, which may allow
1347 // other simplifications.
1348 Value *NewCast = Builder->CreateTrunc(X, I.getType(), "and.shrunk");
1349 Constant *C3 = ConstantExpr::getTrunc(YC, I.getType());
1350 C3 = ConstantExpr::getAnd(C3, AndRHS);
1351 return BinaryOperator::CreateAnd(NewCast, C3);
1356 if (isa<Constant>(Op1))
1357 if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I))
1360 if (Instruction *DeMorgan = matchDeMorgansLaws(I, *Builder))
1364 Value *A = nullptr, *B = nullptr, *C = nullptr;
1365 // A&(A^B) => A & ~B
1367 Value *tmpOp0 = Op0;
1368 Value *tmpOp1 = Op1;
1369 if (match(Op0, m_OneUse(m_Xor(m_Value(A), m_Value(B))))) {
1370 if (A == Op1 || B == Op1 ) {
1377 if (match(tmpOp1, m_OneUse(m_Xor(m_Value(A), m_Value(B))))) {
1381 // Notice that the pattern (A&(~B)) is actually (A&(-1^B)), so if
1382 // A is originally -1 (or a vector of -1 and undefs), then we enter
1383 // an endless loop. By checking that A is non-constant we ensure that
1384 // we will never get to the loop.
1385 if (A == tmpOp0 && !isa<Constant>(A)) // A&(A^B) -> A & ~B
1386 return BinaryOperator::CreateAnd(A, Builder->CreateNot(B));
1390 // (A&((~A)|B)) -> A&B
1391 if (match(Op0, m_c_Or(m_Not(m_Specific(Op1)), m_Value(A))))
1392 return BinaryOperator::CreateAnd(A, Op1);
1393 if (match(Op1, m_c_Or(m_Not(m_Specific(Op0)), m_Value(A))))
1394 return BinaryOperator::CreateAnd(A, Op0);
1396 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
1397 if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
1398 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
1399 if (Op1->hasOneUse() || cast<BinaryOperator>(Op1)->hasOneUse())
1400 return BinaryOperator::CreateAnd(Op0, Builder->CreateNot(C));
1402 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
1403 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
1404 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
1405 if (Op0->hasOneUse() || cast<BinaryOperator>(Op0)->hasOneUse())
1406 return BinaryOperator::CreateAnd(Op1, Builder->CreateNot(C));
1408 // (A | B) & ((~A) ^ B) -> (A & B)
1409 // (A | B) & (B ^ (~A)) -> (A & B)
1410 // (B | A) & ((~A) ^ B) -> (A & B)
1411 // (B | A) & (B ^ (~A)) -> (A & B)
1412 if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
1413 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1414 return BinaryOperator::CreateAnd(A, B);
1416 // ((~A) ^ B) & (A | B) -> (A & B)
1417 // ((~A) ^ B) & (B | A) -> (A & B)
1418 // (B ^ (~A)) & (A | B) -> (A & B)
1419 // (B ^ (~A)) & (B | A) -> (A & B)
1420 if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
1421 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
1422 return BinaryOperator::CreateAnd(A, B);
1426 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
1427 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
1429 if (Value *Res = foldAndOfICmps(LHS, RHS))
1430 return replaceInstUsesWith(I, Res);
1432 // TODO: Make this recursive; it's a little tricky because an arbitrary
1433 // number of 'and' instructions might have to be created.
1435 if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
1436 if (auto *Cmp = dyn_cast<ICmpInst>(X))
1437 if (Value *Res = foldAndOfICmps(LHS, Cmp))
1438 return replaceInstUsesWith(I, Builder->CreateAnd(Res, Y));
1439 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1440 if (Value *Res = foldAndOfICmps(LHS, Cmp))
1441 return replaceInstUsesWith(I, Builder->CreateAnd(Res, X));
1443 if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
1444 if (auto *Cmp = dyn_cast<ICmpInst>(X))
1445 if (Value *Res = foldAndOfICmps(Cmp, RHS))
1446 return replaceInstUsesWith(I, Builder->CreateAnd(Res, Y));
1447 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1448 if (Value *Res = foldAndOfICmps(Cmp, RHS))
1449 return replaceInstUsesWith(I, Builder->CreateAnd(Res, X));
1453 // If and'ing two fcmp, try combine them into one.
1454 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
1455 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1456 if (Value *Res = foldAndOfFCmps(LHS, RHS))
1457 return replaceInstUsesWith(I, Res);
1459 if (Instruction *CastedAnd = foldCastedBitwiseLogic(I))
1462 if (Instruction *Select = foldBoolSextMaskToSelect(I))
1465 return Changed ? &I : nullptr;
1468 /// Given an OR instruction, check to see if this is a bswap idiom. If so,
1469 /// insert the new intrinsic and return it.
1470 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
1471 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1473 // Look through zero extends.
1474 if (Instruction *Ext = dyn_cast<ZExtInst>(Op0))
1475 Op0 = Ext->getOperand(0);
1477 if (Instruction *Ext = dyn_cast<ZExtInst>(Op1))
1478 Op1 = Ext->getOperand(0);
1480 // (A | B) | C and A | (B | C) -> bswap if possible.
1481 bool OrOfOrs = match(Op0, m_Or(m_Value(), m_Value())) ||
1482 match(Op1, m_Or(m_Value(), m_Value()));
1484 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
1485 bool OrOfShifts = match(Op0, m_LogicalShift(m_Value(), m_Value())) &&
1486 match(Op1, m_LogicalShift(m_Value(), m_Value()));
1488 // (A & B) | (C & D) -> bswap if possible.
1489 bool OrOfAnds = match(Op0, m_And(m_Value(), m_Value())) &&
1490 match(Op1, m_And(m_Value(), m_Value()));
1492 if (!OrOfOrs && !OrOfShifts && !OrOfAnds)
1495 SmallVector<Instruction*, 4> Insts;
1496 if (!recognizeBSwapOrBitReverseIdiom(&I, true, false, Insts))
1498 Instruction *LastInst = Insts.pop_back_val();
1499 LastInst->removeFromParent();
1501 for (auto *Inst : Insts)
1506 /// If all elements of two constant vectors are 0/-1 and inverses, return true.
1507 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) {
1508 unsigned NumElts = C1->getType()->getVectorNumElements();
1509 for (unsigned i = 0; i != NumElts; ++i) {
1510 Constant *EltC1 = C1->getAggregateElement(i);
1511 Constant *EltC2 = C2->getAggregateElement(i);
1512 if (!EltC1 || !EltC2)
1515 // One element must be all ones, and the other must be all zeros.
1516 // FIXME: Allow undef elements.
1517 if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) ||
1518 (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes()))))
1524 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or
1525 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
1526 /// B, it can be used as the condition operand of a select instruction.
1527 static Value *getSelectCondition(Value *A, Value *B,
1528 InstCombiner::BuilderTy &Builder) {
1529 // If these are scalars or vectors of i1, A can be used directly.
1530 Type *Ty = A->getType();
1531 if (match(A, m_Not(m_Specific(B))) && Ty->getScalarType()->isIntegerTy(1))
1534 // If A and B are sign-extended, look through the sexts to find the booleans.
1536 if (match(A, m_SExt(m_Value(Cond))) &&
1537 Cond->getType()->getScalarType()->isIntegerTy(1) &&
1538 match(B, m_CombineOr(m_Not(m_SExt(m_Specific(Cond))),
1539 m_SExt(m_Not(m_Specific(Cond))))))
1542 // All scalar (and most vector) possibilities should be handled now.
1543 // Try more matches that only apply to non-splat constant vectors.
1544 if (!Ty->isVectorTy())
1547 // If both operands are constants, see if the constants are inverse bitmasks.
1549 if (match(A, m_Constant(AC)) && match(B, m_Constant(BC)) &&
1550 areInverseVectorBitmasks(AC, BC))
1551 return ConstantExpr::getTrunc(AC, CmpInst::makeCmpResultType(Ty));
1553 // If both operands are xor'd with constants using the same sexted boolean
1554 // operand, see if the constants are inverse bitmasks.
1555 if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AC)))) &&
1556 match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BC)))) &&
1557 Cond->getType()->getScalarType()->isIntegerTy(1) &&
1558 areInverseVectorBitmasks(AC, BC)) {
1559 AC = ConstantExpr::getTrunc(AC, CmpInst::makeCmpResultType(Ty));
1560 return Builder.CreateXor(Cond, AC);
1565 /// We have an expression of the form (A & C) | (B & D). Try to simplify this
1566 /// to "A' ? C : D", where A' is a boolean or vector of booleans.
1567 static Value *matchSelectFromAndOr(Value *A, Value *C, Value *B, Value *D,
1568 InstCombiner::BuilderTy &Builder) {
1569 // The potential condition of the select may be bitcasted. In that case, look
1570 // through its bitcast and the corresponding bitcast of the 'not' condition.
1571 Type *OrigType = A->getType();
1573 if (match(A, m_OneUse(m_BitCast(m_Value(SrcA)))) &&
1574 match(B, m_OneUse(m_BitCast(m_Value(SrcB))))) {
1579 if (Value *Cond = getSelectCondition(A, B, Builder)) {
1580 // ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D))
1581 // The bitcasts will either all exist or all not exist. The builder will
1582 // not create unnecessary casts if the types already match.
1583 Value *BitcastC = Builder.CreateBitCast(C, A->getType());
1584 Value *BitcastD = Builder.CreateBitCast(D, A->getType());
1585 Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastD);
1586 return Builder.CreateBitCast(Select, OrigType);
1592 /// Fold (icmp)|(icmp) if possible.
1593 Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
1594 Instruction &CxtI) {
1595 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1597 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
1598 // if K1 and K2 are a one-bit mask.
1599 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
1600 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
1602 // TODO support vector splats
1603 if (LHS->getPredicate() == ICmpInst::ICMP_EQ && LHSC && LHSC->isZero() &&
1604 RHS->getPredicate() == ICmpInst::ICMP_EQ && RHSC && RHSC->isZero()) {
1606 Value *A, *B, *C, *D;
1607 if (match(LHS->getOperand(0), m_And(m_Value(A), m_Value(B))) &&
1608 match(RHS->getOperand(0), m_And(m_Value(C), m_Value(D)))) {
1609 if (A == D || B == D)
1615 isKnownToBeAPowerOfTwo(B, false, 0, &CxtI) &&
1616 isKnownToBeAPowerOfTwo(D, false, 0, &CxtI)) {
1617 Value *Mask = Builder->CreateOr(B, D);
1618 Value *Masked = Builder->CreateAnd(A, Mask);
1619 return Builder->CreateICmp(ICmpInst::ICMP_NE, Masked, Mask);
1624 // Fold (icmp ult/ule (A + C1), C3) | (icmp ult/ule (A + C2), C3)
1625 // --> (icmp ult/ule ((A & ~(C1 ^ C2)) + max(C1, C2)), C3)
1626 // The original condition actually refers to the following two ranges:
1627 // [MAX_UINT-C1+1, MAX_UINT-C1+1+C3] and [MAX_UINT-C2+1, MAX_UINT-C2+1+C3]
1628 // We can fold these two ranges if:
1629 // 1) C1 and C2 is unsigned greater than C3.
1630 // 2) The two ranges are separated.
1631 // 3) C1 ^ C2 is one-bit mask.
1632 // 4) LowRange1 ^ LowRange2 and HighRange1 ^ HighRange2 are one-bit mask.
1633 // This implies all values in the two ranges differ by exactly one bit.
1635 if ((PredL == ICmpInst::ICMP_ULT || PredL == ICmpInst::ICMP_ULE) &&
1636 PredL == PredR && LHSC && RHSC && LHS->hasOneUse() && RHS->hasOneUse() &&
1637 LHSC->getType() == RHSC->getType() &&
1638 LHSC->getValue() == (RHSC->getValue())) {
1640 Value *LAdd = LHS->getOperand(0);
1641 Value *RAdd = RHS->getOperand(0);
1643 Value *LAddOpnd, *RAddOpnd;
1644 ConstantInt *LAddC, *RAddC;
1645 if (match(LAdd, m_Add(m_Value(LAddOpnd), m_ConstantInt(LAddC))) &&
1646 match(RAdd, m_Add(m_Value(RAddOpnd), m_ConstantInt(RAddC))) &&
1647 LAddC->getValue().ugt(LHSC->getValue()) &&
1648 RAddC->getValue().ugt(LHSC->getValue())) {
1650 APInt DiffC = LAddC->getValue() ^ RAddC->getValue();
1651 if (LAddOpnd == RAddOpnd && DiffC.isPowerOf2()) {
1652 ConstantInt *MaxAddC = nullptr;
1653 if (LAddC->getValue().ult(RAddC->getValue()))
1658 APInt RRangeLow = -RAddC->getValue();
1659 APInt RRangeHigh = RRangeLow + LHSC->getValue();
1660 APInt LRangeLow = -LAddC->getValue();
1661 APInt LRangeHigh = LRangeLow + LHSC->getValue();
1662 APInt LowRangeDiff = RRangeLow ^ LRangeLow;
1663 APInt HighRangeDiff = RRangeHigh ^ LRangeHigh;
1664 APInt RangeDiff = LRangeLow.sgt(RRangeLow) ? LRangeLow - RRangeLow
1665 : RRangeLow - LRangeLow;
1667 if (LowRangeDiff.isPowerOf2() && LowRangeDiff == HighRangeDiff &&
1668 RangeDiff.ugt(LHSC->getValue())) {
1669 Value *MaskC = ConstantInt::get(LAddC->getType(), ~DiffC);
1671 Value *NewAnd = Builder->CreateAnd(LAddOpnd, MaskC);
1672 Value *NewAdd = Builder->CreateAdd(NewAnd, MaxAddC);
1673 return (Builder->CreateICmp(LHS->getPredicate(), NewAdd, LHSC));
1679 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
1680 if (PredicatesFoldable(PredL, PredR)) {
1681 if (LHS->getOperand(0) == RHS->getOperand(1) &&
1682 LHS->getOperand(1) == RHS->getOperand(0))
1683 LHS->swapOperands();
1684 if (LHS->getOperand(0) == RHS->getOperand(0) &&
1685 LHS->getOperand(1) == RHS->getOperand(1)) {
1686 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
1687 unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
1688 bool isSigned = LHS->isSigned() || RHS->isSigned();
1689 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
1693 // handle (roughly):
1694 // (icmp ne (A & B), C) | (icmp ne (A & D), E)
1695 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, false, Builder))
1698 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
1699 if (LHS->hasOneUse() || RHS->hasOneUse()) {
1700 // (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1)
1701 // (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1)
1702 Value *A = nullptr, *B = nullptr;
1703 if (PredL == ICmpInst::ICMP_EQ && LHSC && LHSC->isZero()) {
1705 if (PredR == ICmpInst::ICMP_ULT && LHS0 == RHS->getOperand(1))
1707 else if (PredR == ICmpInst::ICMP_UGT && LHS0 == RHS0)
1708 A = RHS->getOperand(1);
1710 // (icmp ult A, B) | (icmp eq B, 0) -> (icmp ule A, B-1)
1711 // (icmp ugt B, A) | (icmp eq B, 0) -> (icmp ule A, B-1)
1712 else if (PredR == ICmpInst::ICMP_EQ && RHSC && RHSC->isZero()) {
1714 if (PredL == ICmpInst::ICMP_ULT && RHS0 == LHS->getOperand(1))
1716 else if (PredL == ICmpInst::ICMP_UGT && LHS0 == RHS0)
1717 A = LHS->getOperand(1);
1720 return Builder->CreateICmp(
1722 Builder->CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A);
1725 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
1726 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/true))
1729 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
1730 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/true))
1733 if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, false, Builder))
1736 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
1740 if (LHSC == RHSC && PredL == PredR) {
1741 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
1742 if (PredL == ICmpInst::ICMP_NE && LHSC->isZero()) {
1743 Value *NewOr = Builder->CreateOr(LHS0, RHS0);
1744 return Builder->CreateICmp(PredL, NewOr, LHSC);
1748 // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1)
1749 // iff C2 + CA == C1.
1750 if (PredL == ICmpInst::ICMP_ULT && PredR == ICmpInst::ICMP_EQ) {
1752 if (match(LHS0, m_Add(m_Specific(RHS0), m_ConstantInt(AddC))))
1753 if (RHSC->getValue() + AddC->getValue() == LHSC->getValue())
1754 return Builder->CreateICmpULE(LHS0, LHSC);
1757 // From here on, we only handle:
1758 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
1762 // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere.
1763 if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE ||
1764 PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE ||
1765 PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE ||
1766 PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE)
1769 // We can't fold (ugt x, C) | (sgt x, C2).
1770 if (!PredicatesFoldable(PredL, PredR))
1773 // Ensure that the larger constant is on the RHS.
1775 if (CmpInst::isSigned(PredL) ||
1776 (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR)))
1777 ShouldSwap = LHSC->getValue().sgt(RHSC->getValue());
1779 ShouldSwap = LHSC->getValue().ugt(RHSC->getValue());
1782 std::swap(LHS, RHS);
1783 std::swap(LHSC, RHSC);
1784 std::swap(PredL, PredR);
1787 // At this point, we know we have two icmp instructions
1788 // comparing a value against two constants and or'ing the result
1789 // together. Because of the above check, we know that we only have
1790 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
1791 // icmp folding check above), that the two constants are not
1793 assert(LHSC != RHSC && "Compares not folded above?");
1797 llvm_unreachable("Unknown integer condition code!");
1798 case ICmpInst::ICMP_EQ:
1801 llvm_unreachable("Unknown integer condition code!");
1802 case ICmpInst::ICMP_EQ:
1803 // Potential folds for this case should already be handled.
1805 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
1806 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
1810 case ICmpInst::ICMP_ULT:
1813 llvm_unreachable("Unknown integer condition code!");
1814 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
1816 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
1817 assert(!RHSC->isMaxValue(false) && "Missed icmp simplification");
1818 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1,
1822 case ICmpInst::ICMP_SLT:
1825 llvm_unreachable("Unknown integer condition code!");
1826 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
1828 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
1829 assert(!RHSC->isMaxValue(true) && "Missed icmp simplification");
1830 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1, true,
1838 /// Optimize (fcmp)|(fcmp). NOTE: Unlike the rest of instcombine, this returns
1839 /// a Value which should already be inserted into the function.
1840 Value *InstCombiner::foldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
1841 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
1842 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
1843 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
1845 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
1846 // Swap RHS operands to match LHS.
1847 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
1848 std::swap(Op1LHS, Op1RHS);
1851 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
1852 // This is a similar transformation to the one in FoldAndOfFCmps.
1854 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1855 // bool(R & CC0) || bool(R & CC1)
1856 // = bool((R & CC0) | (R & CC1))
1857 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;)
1858 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS)
1859 return getFCmpValue(getFCmpCode(Op0CC) | getFCmpCode(Op1CC), Op0LHS, Op0RHS,
1862 if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
1863 RHS->getPredicate() == FCmpInst::FCMP_UNO &&
1864 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
1865 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
1866 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
1867 // If either of the constants are nans, then the whole thing returns
1869 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
1870 return Builder->getTrue();
1872 // Otherwise, no need to compare the two constants, compare the
1874 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
1877 // Handle vector zeros. This occurs because the canonical form of
1878 // "fcmp uno x,x" is "fcmp uno x, 0".
1879 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
1880 isa<ConstantAggregateZero>(RHS->getOperand(1)))
1881 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
1889 /// This helper function folds:
1891 /// ((A | B) & C1) | (B & C2)
1897 /// when the XOR of the two constants is "all ones" (-1).
1898 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
1899 Value *A, Value *B, Value *C) {
1900 ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
1901 if (!CI1) return nullptr;
1903 Value *V1 = nullptr;
1904 ConstantInt *CI2 = nullptr;
1905 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return nullptr;
1907 APInt Xor = CI1->getValue() ^ CI2->getValue();
1908 if (!Xor.isAllOnesValue()) return nullptr;
1910 if (V1 == A || V1 == B) {
1911 Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
1912 return BinaryOperator::CreateOr(NewOp, V1);
1918 /// \brief This helper function folds:
1920 /// ((A | B) & C1) ^ (B & C2)
1926 /// when the XOR of the two constants is "all ones" (-1).
1927 Instruction *InstCombiner::FoldXorWithConstants(BinaryOperator &I, Value *Op,
1928 Value *A, Value *B, Value *C) {
1929 ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
1933 Value *V1 = nullptr;
1934 ConstantInt *CI2 = nullptr;
1935 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2))))
1938 APInt Xor = CI1->getValue() ^ CI2->getValue();
1939 if (!Xor.isAllOnesValue())
1942 if (V1 == A || V1 == B) {
1943 Value *NewOp = Builder->CreateAnd(V1 == A ? B : A, CI1);
1944 return BinaryOperator::CreateXor(NewOp, V1);
1950 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
1951 // here. We should standardize that construct where it is needed or choose some
1952 // other way to ensure that commutated variants of patterns are not missed.
1953 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
1954 bool Changed = SimplifyAssociativeOrCommutative(I);
1955 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1957 if (Value *V = SimplifyVectorOp(I))
1958 return replaceInstUsesWith(I, V);
1960 if (Value *V = SimplifyOrInst(Op0, Op1, SQ.getWithInstruction(&I)))
1961 return replaceInstUsesWith(I, V);
1963 // See if we can simplify any instructions used by the instruction whose sole
1964 // purpose is to compute bits we don't care about.
1965 if (SimplifyDemandedInstructionBits(I))
1968 // Do this before using distributive laws to catch simple and/or/not patterns.
1969 if (Instruction *Xor = foldOrToXor(I, *Builder))
1972 // (A&B)|(A&C) -> A&(B|C) etc
1973 if (Value *V = SimplifyUsingDistributiveLaws(I))
1974 return replaceInstUsesWith(I, V);
1976 if (Value *V = SimplifyBSwap(I))
1977 return replaceInstUsesWith(I, V);
1979 if (isa<Constant>(Op1))
1980 if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I))
1983 // Given an OR instruction, check to see if this is a bswap.
1984 if (Instruction *BSwap = MatchBSwap(I))
1990 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
1991 if (match(Op0, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) &&
1992 MaskedValueIsZero(Op1, *C, 0, &I)) {
1993 Value *NOr = Builder->CreateOr(A, Op1);
1995 return BinaryOperator::CreateXor(NOr,
1996 ConstantInt::get(NOr->getType(), *C));
1999 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
2000 if (match(Op1, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) &&
2001 MaskedValueIsZero(Op0, *C, 0, &I)) {
2002 Value *NOr = Builder->CreateOr(A, Op0);
2004 return BinaryOperator::CreateXor(NOr,
2005 ConstantInt::get(NOr->getType(), *C));
2011 // ((~A & B) | A) -> (A | B)
2012 if (match(Op0, m_c_And(m_Not(m_Specific(Op1)), m_Value(A))))
2013 return BinaryOperator::CreateOr(A, Op1);
2014 if (match(Op1, m_c_And(m_Not(m_Specific(Op0)), m_Value(A))))
2015 return BinaryOperator::CreateOr(Op0, A);
2017 // ((A & B) | ~A) -> (~A | B)
2018 // The NOT is guaranteed to be in the RHS by complexity ordering.
2019 if (match(Op1, m_Not(m_Value(A))) &&
2020 match(Op0, m_c_And(m_Specific(A), m_Value(B))))
2021 return BinaryOperator::CreateOr(Op1, B);
2024 Value *C = nullptr, *D = nullptr;
2025 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
2026 match(Op1, m_And(m_Value(B), m_Value(D)))) {
2027 Value *V1 = nullptr, *V2 = nullptr;
2028 ConstantInt *C1 = dyn_cast<ConstantInt>(C);
2029 ConstantInt *C2 = dyn_cast<ConstantInt>(D);
2030 if (C1 && C2) { // (A & C1)|(B & C2)
2031 if ((C1->getValue() & C2->getValue()).isNullValue()) {
2032 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
2033 // iff (C1&C2) == 0 and (N&~C1) == 0
2034 if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
2036 MaskedValueIsZero(V2, ~C1->getValue(), 0, &I)) || // (V|N)
2038 MaskedValueIsZero(V1, ~C1->getValue(), 0, &I)))) // (N|V)
2039 return BinaryOperator::CreateAnd(A,
2040 Builder->getInt(C1->getValue()|C2->getValue()));
2041 // Or commutes, try both ways.
2042 if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
2044 MaskedValueIsZero(V2, ~C2->getValue(), 0, &I)) || // (V|N)
2046 MaskedValueIsZero(V1, ~C2->getValue(), 0, &I)))) // (N|V)
2047 return BinaryOperator::CreateAnd(B,
2048 Builder->getInt(C1->getValue()|C2->getValue()));
2050 // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
2051 // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
2052 ConstantInt *C3 = nullptr, *C4 = nullptr;
2053 if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
2054 (C3->getValue() & ~C1->getValue()).isNullValue() &&
2055 match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
2056 (C4->getValue() & ~C2->getValue()).isNullValue()) {
2057 V2 = Builder->CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
2058 return BinaryOperator::CreateAnd(V2,
2059 Builder->getInt(C1->getValue()|C2->getValue()));
2064 // Don't try to form a select if it's unlikely that we'll get rid of at
2065 // least one of the operands. A select is generally more expensive than the
2066 // 'or' that it is replacing.
2067 if (Op0->hasOneUse() || Op1->hasOneUse()) {
2068 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
2069 if (Value *V = matchSelectFromAndOr(A, C, B, D, *Builder))
2070 return replaceInstUsesWith(I, V);
2071 if (Value *V = matchSelectFromAndOr(A, C, D, B, *Builder))
2072 return replaceInstUsesWith(I, V);
2073 if (Value *V = matchSelectFromAndOr(C, A, B, D, *Builder))
2074 return replaceInstUsesWith(I, V);
2075 if (Value *V = matchSelectFromAndOr(C, A, D, B, *Builder))
2076 return replaceInstUsesWith(I, V);
2077 if (Value *V = matchSelectFromAndOr(B, D, A, C, *Builder))
2078 return replaceInstUsesWith(I, V);
2079 if (Value *V = matchSelectFromAndOr(B, D, C, A, *Builder))
2080 return replaceInstUsesWith(I, V);
2081 if (Value *V = matchSelectFromAndOr(D, B, A, C, *Builder))
2082 return replaceInstUsesWith(I, V);
2083 if (Value *V = matchSelectFromAndOr(D, B, C, A, *Builder))
2084 return replaceInstUsesWith(I, V);
2087 // ((A|B)&1)|(B&-2) -> (A&1) | B
2088 if (match(A, m_Or(m_Value(V1), m_Specific(B))) ||
2089 match(A, m_Or(m_Specific(B), m_Value(V1)))) {
2090 Instruction *Ret = FoldOrWithConstants(I, Op1, V1, B, C);
2091 if (Ret) return Ret;
2093 // (B&-2)|((A|B)&1) -> (A&1) | B
2094 if (match(B, m_Or(m_Specific(A), m_Value(V1))) ||
2095 match(B, m_Or(m_Value(V1), m_Specific(A)))) {
2096 Instruction *Ret = FoldOrWithConstants(I, Op0, A, V1, D);
2097 if (Ret) return Ret;
2099 // ((A^B)&1)|(B&-2) -> (A&1) ^ B
2100 if (match(A, m_Xor(m_Value(V1), m_Specific(B))) ||
2101 match(A, m_Xor(m_Specific(B), m_Value(V1)))) {
2102 Instruction *Ret = FoldXorWithConstants(I, Op1, V1, B, C);
2103 if (Ret) return Ret;
2105 // (B&-2)|((A^B)&1) -> (A&1) ^ B
2106 if (match(B, m_Xor(m_Specific(A), m_Value(V1))) ||
2107 match(B, m_Xor(m_Value(V1), m_Specific(A)))) {
2108 Instruction *Ret = FoldXorWithConstants(I, Op0, A, V1, D);
2109 if (Ret) return Ret;
2113 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
2114 // FIXME: The two hasOneUse calls here are the same call, maybe we were
2115 // supposed to check Op1->operand(0)?
2116 if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
2117 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
2118 if (Op1->hasOneUse() || cast<BinaryOperator>(Op1)->hasOneUse())
2119 return BinaryOperator::CreateOr(Op0, C);
2121 // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C
2122 // FIXME: The two hasOneUse calls here are the same call, maybe we were
2123 // supposed to check Op0->operand(0)?
2124 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
2125 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
2126 if (Op0->hasOneUse() || cast<BinaryOperator>(Op0)->hasOneUse())
2127 return BinaryOperator::CreateOr(Op1, C);
2129 // ((B | C) & A) | B -> B | (A & C)
2130 if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A))))
2131 return BinaryOperator::CreateOr(Op1, Builder->CreateAnd(A, C));
2133 if (Instruction *DeMorgan = matchDeMorgansLaws(I, *Builder))
2136 // Canonicalize xor to the RHS.
2137 bool SwappedForXor = false;
2138 if (match(Op0, m_Xor(m_Value(), m_Value()))) {
2139 std::swap(Op0, Op1);
2140 SwappedForXor = true;
2143 // A | ( A ^ B) -> A | B
2144 // A | (~A ^ B) -> A | ~B
2145 // (A & B) | (A ^ B)
2146 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
2147 if (Op0 == A || Op0 == B)
2148 return BinaryOperator::CreateOr(A, B);
2150 if (match(Op0, m_And(m_Specific(A), m_Specific(B))) ||
2151 match(Op0, m_And(m_Specific(B), m_Specific(A))))
2152 return BinaryOperator::CreateOr(A, B);
2154 if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) {
2155 Value *Not = Builder->CreateNot(B, B->getName()+".not");
2156 return BinaryOperator::CreateOr(Not, Op0);
2158 if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) {
2159 Value *Not = Builder->CreateNot(A, A->getName()+".not");
2160 return BinaryOperator::CreateOr(Not, Op0);
2164 // A | ~(A | B) -> A | ~B
2165 // A | ~(A ^ B) -> A | ~B
2166 if (match(Op1, m_Not(m_Value(A))))
2167 if (BinaryOperator *B = dyn_cast<BinaryOperator>(A))
2168 if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) &&
2169 Op1->hasOneUse() && (B->getOpcode() == Instruction::Or ||
2170 B->getOpcode() == Instruction::Xor)) {
2171 Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) :
2173 Value *Not = Builder->CreateNot(NotOp, NotOp->getName()+".not");
2174 return BinaryOperator::CreateOr(Not, Op0);
2177 // (A & B) | (~A ^ B) -> (~A ^ B)
2178 // (A & B) | (B ^ ~A) -> (~A ^ B)
2179 // (B & A) | (~A ^ B) -> (~A ^ B)
2180 // (B & A) | (B ^ ~A) -> (~A ^ B)
2181 // The match order is important: match the xor first because the 'not'
2182 // operation defines 'A'. We do not need to match the xor as Op0 because the
2183 // xor was canonicalized to Op1 above.
2184 if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2185 match(Op0, m_c_And(m_Specific(A), m_Specific(B))))
2186 return BinaryOperator::CreateXor(Builder->CreateNot(A), B);
2189 std::swap(Op0, Op1);
2192 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
2193 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
2195 if (Value *Res = foldOrOfICmps(LHS, RHS, I))
2196 return replaceInstUsesWith(I, Res);
2198 // TODO: Make this recursive; it's a little tricky because an arbitrary
2199 // number of 'or' instructions might have to be created.
2201 if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
2202 if (auto *Cmp = dyn_cast<ICmpInst>(X))
2203 if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
2204 return replaceInstUsesWith(I, Builder->CreateOr(Res, Y));
2205 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2206 if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
2207 return replaceInstUsesWith(I, Builder->CreateOr(Res, X));
2209 if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
2210 if (auto *Cmp = dyn_cast<ICmpInst>(X))
2211 if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
2212 return replaceInstUsesWith(I, Builder->CreateOr(Res, Y));
2213 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2214 if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
2215 return replaceInstUsesWith(I, Builder->CreateOr(Res, X));
2219 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
2220 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
2221 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
2222 if (Value *Res = foldOrOfFCmps(LHS, RHS))
2223 return replaceInstUsesWith(I, Res);
2225 if (Instruction *CastedOr = foldCastedBitwiseLogic(I))
2228 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
2229 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) &&
2230 A->getType()->getScalarType()->isIntegerTy(1))
2231 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1);
2232 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) &&
2233 A->getType()->getScalarType()->isIntegerTy(1))
2234 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0);
2236 // Note: If we've gotten to the point of visiting the outer OR, then the
2237 // inner one couldn't be simplified. If it was a constant, then it won't
2238 // be simplified by a later pass either, so we try swapping the inner/outer
2239 // ORs in the hopes that we'll be able to simplify it this way.
2240 // (X|C) | V --> (X|V) | C
2242 if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) &&
2243 match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) {
2244 Value *Inner = Builder->CreateOr(A, Op1);
2245 Inner->takeName(Op0);
2246 return BinaryOperator::CreateOr(Inner, C1);
2249 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
2250 // Since this OR statement hasn't been optimized further yet, we hope
2251 // that this transformation will allow the new ORs to be optimized.
2253 Value *X = nullptr, *Y = nullptr;
2254 if (Op0->hasOneUse() && Op1->hasOneUse() &&
2255 match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
2256 match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
2257 Value *orTrue = Builder->CreateOr(A, C);
2258 Value *orFalse = Builder->CreateOr(B, D);
2259 return SelectInst::Create(X, orTrue, orFalse);
2263 return Changed ? &I : nullptr;
2266 /// A ^ B can be specified using other logic ops in a variety of patterns. We
2267 /// can fold these early and efficiently by morphing an existing instruction.
2268 static Instruction *foldXorToXor(BinaryOperator &I) {
2269 assert(I.getOpcode() == Instruction::Xor);
2270 Value *Op0 = I.getOperand(0);
2271 Value *Op1 = I.getOperand(1);
2274 // There are 4 commuted variants for each of the basic patterns.
2276 // (A & B) ^ (A | B) -> A ^ B
2277 // (A & B) ^ (B | A) -> A ^ B
2278 // (A | B) ^ (A & B) -> A ^ B
2279 // (A | B) ^ (B & A) -> A ^ B
2280 if ((match(Op0, m_And(m_Value(A), m_Value(B))) &&
2281 match(Op1, m_c_Or(m_Specific(A), m_Specific(B)))) ||
2282 (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
2283 match(Op1, m_c_And(m_Specific(A), m_Specific(B))))) {
2289 // (A | ~B) ^ (~A | B) -> A ^ B
2290 // (~B | A) ^ (~A | B) -> A ^ B
2291 // (~A | B) ^ (A | ~B) -> A ^ B
2292 // (B | ~A) ^ (A | ~B) -> A ^ B
2293 if ((match(Op0, m_c_Or(m_Value(A), m_Not(m_Value(B)))) &&
2294 match(Op1, m_Or(m_Not(m_Specific(A)), m_Specific(B)))) ||
2295 (match(Op0, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2296 match(Op1, m_Or(m_Specific(A), m_Not(m_Specific(B)))))) {
2302 // (A & ~B) ^ (~A & B) -> A ^ B
2303 // (~B & A) ^ (~A & B) -> A ^ B
2304 // (~A & B) ^ (A & ~B) -> A ^ B
2305 // (B & ~A) ^ (A & ~B) -> A ^ B
2306 if ((match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2307 match(Op1, m_And(m_Not(m_Specific(A)), m_Specific(B)))) ||
2308 (match(Op0, m_c_And(m_Not(m_Value(A)), m_Value(B))) &&
2309 match(Op1, m_And(m_Specific(A), m_Not(m_Specific(B)))))) {
2318 Value *InstCombiner::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
2319 if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) {
2320 if (LHS->getOperand(0) == RHS->getOperand(1) &&
2321 LHS->getOperand(1) == RHS->getOperand(0))
2322 LHS->swapOperands();
2323 if (LHS->getOperand(0) == RHS->getOperand(0) &&
2324 LHS->getOperand(1) == RHS->getOperand(1)) {
2325 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
2326 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
2327 unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
2328 bool isSigned = LHS->isSigned() || RHS->isSigned();
2329 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
2336 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2337 // here. We should standardize that construct where it is needed or choose some
2338 // other way to ensure that commutated variants of patterns are not missed.
2339 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
2340 bool Changed = SimplifyAssociativeOrCommutative(I);
2341 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2343 if (Value *V = SimplifyVectorOp(I))
2344 return replaceInstUsesWith(I, V);
2346 if (Value *V = SimplifyXorInst(Op0, Op1, SQ.getWithInstruction(&I)))
2347 return replaceInstUsesWith(I, V);
2349 if (Instruction *NewXor = foldXorToXor(I))
2352 // (A&B)^(A&C) -> A&(B^C) etc
2353 if (Value *V = SimplifyUsingDistributiveLaws(I))
2354 return replaceInstUsesWith(I, V);
2356 // See if we can simplify any instructions used by the instruction whose sole
2357 // purpose is to compute bits we don't care about.
2358 if (SimplifyDemandedInstructionBits(I))
2361 if (Value *V = SimplifyBSwap(I))
2362 return replaceInstUsesWith(I, V);
2364 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
2367 // We must eliminate the and/or (one-use) for these transforms to not increase
2368 // the instruction count.
2369 // ~(~X & Y) --> (X | ~Y)
2370 // ~(Y & ~X) --> (X | ~Y)
2371 if (match(&I, m_Not(m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y)))))) {
2372 Value *NotY = Builder->CreateNot(Y, Y->getName() + ".not");
2373 return BinaryOperator::CreateOr(X, NotY);
2375 // ~(~X | Y) --> (X & ~Y)
2376 // ~(Y | ~X) --> (X & ~Y)
2377 if (match(&I, m_Not(m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y)))))) {
2378 Value *NotY = Builder->CreateNot(Y, Y->getName() + ".not");
2379 return BinaryOperator::CreateAnd(X, NotY);
2382 // Is this a 'not' (~) fed by a binary operator?
2383 BinaryOperator *NotVal;
2384 if (match(&I, m_Not(m_BinOp(NotVal)))) {
2385 if (NotVal->getOpcode() == Instruction::And ||
2386 NotVal->getOpcode() == Instruction::Or) {
2387 // Apply DeMorgan's Law when inverts are free:
2388 // ~(X & Y) --> (~X | ~Y)
2389 // ~(X | Y) --> (~X & ~Y)
2390 if (IsFreeToInvert(NotVal->getOperand(0),
2391 NotVal->getOperand(0)->hasOneUse()) &&
2392 IsFreeToInvert(NotVal->getOperand(1),
2393 NotVal->getOperand(1)->hasOneUse())) {
2394 Value *NotX = Builder->CreateNot(NotVal->getOperand(0), "notlhs");
2395 Value *NotY = Builder->CreateNot(NotVal->getOperand(1), "notrhs");
2396 if (NotVal->getOpcode() == Instruction::And)
2397 return BinaryOperator::CreateOr(NotX, NotY);
2398 return BinaryOperator::CreateAnd(NotX, NotY);
2402 // ~(~X >>s Y) --> (X >>s Y)
2403 if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y))))
2404 return BinaryOperator::CreateAShr(X, Y);
2406 // If we are inverting a right-shifted constant, we may be able to eliminate
2407 // the 'not' by inverting the constant and using the opposite shift type.
2408 // Canonicalization rules ensure that only a negative constant uses 'ashr',
2409 // but we must check that in case that transform has not fired yet.
2411 if (match(NotVal, m_AShr(m_APInt(C), m_Value(Y))) && C->isNegative()) {
2412 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
2413 Constant *NotC = ConstantInt::get(I.getType(), ~(*C));
2414 return BinaryOperator::CreateLShr(NotC, Y);
2417 if (match(NotVal, m_LShr(m_APInt(C), m_Value(Y))) && C->isNonNegative()) {
2418 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
2419 Constant *NotC = ConstantInt::get(I.getType(), ~(*C));
2420 return BinaryOperator::CreateAShr(NotC, Y);
2424 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
2425 ICmpInst::Predicate Pred;
2426 if (match(Op0, m_OneUse(m_Cmp(Pred, m_Value(), m_Value()))) &&
2427 match(Op1, m_AllOnes())) {
2428 cast<CmpInst>(Op0)->setPredicate(CmpInst::getInversePredicate(Pred));
2429 return replaceInstUsesWith(I, Op0);
2432 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1)) {
2433 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
2434 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
2435 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
2436 if (CI->hasOneUse() && Op0C->hasOneUse()) {
2437 Instruction::CastOps Opcode = Op0C->getOpcode();
2438 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
2439 (RHSC == ConstantExpr::getCast(Opcode, Builder->getTrue(),
2440 Op0C->getDestTy()))) {
2441 CI->setPredicate(CI->getInversePredicate());
2442 return CastInst::Create(Opcode, CI, Op0C->getType());
2448 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2449 // ~(c-X) == X-c-1 == X+(-c-1)
2450 if (Op0I->getOpcode() == Instruction::Sub && RHSC->isAllOnesValue())
2451 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
2452 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
2453 return BinaryOperator::CreateAdd(Op0I->getOperand(1),
2457 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
2458 if (Op0I->getOpcode() == Instruction::Add) {
2459 // ~(X-c) --> (-c-1)-X
2460 if (RHSC->isAllOnesValue()) {
2461 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
2462 return BinaryOperator::CreateSub(SubOne(NegOp0CI),
2463 Op0I->getOperand(0));
2464 } else if (RHSC->getValue().isSignMask()) {
2465 // (X + C) ^ signmask -> (X + C + signmask)
2466 Constant *C = Builder->getInt(RHSC->getValue() + Op0CI->getValue());
2467 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
2470 } else if (Op0I->getOpcode() == Instruction::Or) {
2471 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
2472 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue(),
2474 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHSC);
2475 // Anything in both C1 and C2 is known to be zero, remove it from
2477 Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHSC);
2478 NewRHS = ConstantExpr::getAnd(NewRHS,
2479 ConstantExpr::getNot(CommonBits));
2481 I.setOperand(0, Op0I->getOperand(0));
2482 I.setOperand(1, NewRHS);
2485 } else if (Op0I->getOpcode() == Instruction::LShr) {
2486 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
2490 if (Op0I->hasOneUse() &&
2491 (E1 = dyn_cast<BinaryOperator>(Op0I->getOperand(0))) &&
2492 E1->getOpcode() == Instruction::Xor &&
2493 (C1 = dyn_cast<ConstantInt>(E1->getOperand(1)))) {
2494 // fold (C1 >> C2) ^ C3
2495 ConstantInt *C2 = Op0CI, *C3 = RHSC;
2496 APInt FoldConst = C1->getValue().lshr(C2->getValue());
2497 FoldConst ^= C3->getValue();
2498 // Prepare the two operands.
2499 Value *Opnd0 = Builder->CreateLShr(E1->getOperand(0), C2);
2500 Opnd0->takeName(Op0I);
2501 cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc());
2502 Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst);
2504 return BinaryOperator::CreateXor(Opnd0, FoldVal);
2511 if (isa<Constant>(Op1))
2512 if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I))
2517 if (match(Op1, m_OneUse(m_Or(m_Value(A), m_Value(B))))) {
2518 if (A == Op0) { // A^(A|B) == A^(B|A)
2519 cast<BinaryOperator>(Op1)->swapOperands();
2522 if (B == Op0) { // A^(B|A) == (B|A)^A
2523 I.swapOperands(); // Simplified below.
2524 std::swap(Op0, Op1);
2526 } else if (match(Op1, m_OneUse(m_And(m_Value(A), m_Value(B))))) {
2527 if (A == Op0) { // A^(A&B) -> A^(B&A)
2528 cast<BinaryOperator>(Op1)->swapOperands();
2531 if (B == Op0) { // A^(B&A) -> (B&A)^A
2532 I.swapOperands(); // Simplified below.
2533 std::swap(Op0, Op1);
2540 if (match(Op0, m_OneUse(m_Or(m_Value(A), m_Value(B))))) {
2541 if (A == Op1) // (B|A)^B == (A|B)^B
2543 if (B == Op1) // (A|B)^B == A & ~B
2544 return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1));
2545 } else if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B))))) {
2546 if (A == Op1) // (A&B)^A -> (B&A)^A
2549 if (B == Op1 && // (B&A)^A == ~B & A
2550 !match(Op1, m_APInt(C))) { // Canonical form is (B&C)^C
2551 return BinaryOperator::CreateAnd(Builder->CreateNot(A), Op1);
2557 Value *A, *B, *C, *D;
2558 // (A ^ C)^(A | B) -> ((~A) & B) ^ C
2559 if (match(Op0, m_Xor(m_Value(D), m_Value(C))) &&
2560 match(Op1, m_Or(m_Value(A), m_Value(B)))) {
2562 return BinaryOperator::CreateXor(
2563 Builder->CreateAnd(Builder->CreateNot(A), B), C);
2565 return BinaryOperator::CreateXor(
2566 Builder->CreateAnd(Builder->CreateNot(B), A), C);
2568 // (A | B)^(A ^ C) -> ((~A) & B) ^ C
2569 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
2570 match(Op1, m_Xor(m_Value(D), m_Value(C)))) {
2572 return BinaryOperator::CreateXor(
2573 Builder->CreateAnd(Builder->CreateNot(A), B), C);
2575 return BinaryOperator::CreateXor(
2576 Builder->CreateAnd(Builder->CreateNot(B), A), C);
2578 // (A & B) ^ (A ^ B) -> (A | B)
2579 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2580 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
2581 return BinaryOperator::CreateOr(A, B);
2582 // (A ^ B) ^ (A & B) -> (A | B)
2583 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2584 match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
2585 return BinaryOperator::CreateOr(A, B);
2588 // (A & ~B) ^ ~A -> ~(A & B)
2589 // (~B & A) ^ ~A -> ~(A & B)
2591 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2592 match(Op1, m_Not(m_Specific(A))))
2593 return BinaryOperator::CreateNot(Builder->CreateAnd(A, B));
2595 if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
2596 if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
2597 if (Value *V = foldXorOfICmps(LHS, RHS))
2598 return replaceInstUsesWith(I, V);
2600 if (Instruction *CastedXor = foldCastedBitwiseLogic(I))
2603 return Changed ? &I : nullptr;