From 771c4b9eb5b9882993f82cbfd4d433e698e07f62 Mon Sep 17 00:00:00 2001 From: Hans Wennborg Date: Tue, 3 Oct 2017 18:32:29 +0000 Subject: [PATCH] Revert r314806 "[SLP] Vectorize jumbled memory loads." All the buildbots are red, e.g. http://lab.llvm.org:8011/builders/clang-cmake-aarch64-lld/builds/2436/ > Summary: > This patch tries to vectorize loads of consecutive memory accesses, accessed > in non-consecutive or jumbled way. An earlier attempt was made with patch D26905 > which was reverted back due to some basic issue with representing the 'use mask' of > jumbled accesses. > > This patch fixes the mask representation by recording the 'use mask' in the usertree entry. > > Change-Id: I9fe7f5045f065d84c126fa307ef6ebe0787296df > > Reviewers: mkuper, loladiro, Ayal, zvi, danielcdh > > Reviewed By: Ayal > > Subscribers: hans, mzolotukhin > > Differential Revision: https://reviews.llvm.org/D36130 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@314824 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Analysis/LoopAccessAnalysis.h | 15 -- lib/Analysis/LoopAccessAnalysis.cpp | 71 ------ lib/Transforms/Vectorize/SLPVectorizer.cpp | 269 +++++++-------------- .../SLPVectorizer/X86/jumbled-load-multiuse.ll | 24 +- .../X86/jumbled-load-shuffle-placement.ll | 68 ------ test/Transforms/SLPVectorizer/X86/jumbled-load.ll | 37 +-- test/Transforms/SLPVectorizer/X86/store-jumbled.ll | 25 +- 7 files changed, 136 insertions(+), 373 deletions(-) delete mode 100644 test/Transforms/SLPVectorizer/X86/jumbled-load-shuffle-placement.ll diff --git a/include/llvm/Analysis/LoopAccessAnalysis.h b/include/llvm/Analysis/LoopAccessAnalysis.h index 54f151ef82e..28154c873b7 100644 --- a/include/llvm/Analysis/LoopAccessAnalysis.h +++ b/include/llvm/Analysis/LoopAccessAnalysis.h @@ -667,21 +667,6 @@ int64_t getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp, const ValueToValueMap &StridesMap = ValueToValueMap(), bool Assume = false, bool ShouldCheckWrap = true); -/// \brief Attempt to sort the 'loads' in \p VL and return the sorted values in -/// \p Sorted. -/// -/// Returns 'false' if sorting is not legal or feasible, otherwise returns -/// 'true'. If \p Mask is not null, it also returns the \p Mask which is the -/// shuffle mask for actual memory access order. -/// -/// For example, for a given VL of memory accesses in program order, a[i+2], -/// a[i+0], a[i+1] and a[i+3], this function will sort the VL and save the -/// sorted value in 'Sorted' as a[i+0], a[i+1], a[i+2], a[i+3] and saves the -/// mask for actual memory accesses in program order in 'Mask' as <2,0,1,3> -bool sortLoadAccesses(ArrayRef VL, const DataLayout &DL, - ScalarEvolution &SE, SmallVectorImpl &Sorted, - SmallVectorImpl *Mask = nullptr); - /// \brief Returns true if the memory operations \p A and \p B are consecutive. /// This is a simple API that does not depend on the analysis pass. bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, diff --git a/lib/Analysis/LoopAccessAnalysis.cpp b/lib/Analysis/LoopAccessAnalysis.cpp index 359e0efbfc1..eb633196d33 100644 --- a/lib/Analysis/LoopAccessAnalysis.cpp +++ b/lib/Analysis/LoopAccessAnalysis.cpp @@ -1107,77 +1107,6 @@ static unsigned getAddressSpaceOperand(Value *I) { return -1; } -// TODO:This API can be improved by using the permutation of given width as the -// accesses are entered into the map. -bool llvm::sortLoadAccesses(ArrayRef VL, const DataLayout &DL, - ScalarEvolution &SE, - SmallVectorImpl &Sorted, - SmallVectorImpl *Mask) { - SmallVector, 4> OffValPairs; - OffValPairs.reserve(VL.size()); - Sorted.reserve(VL.size()); - - // Walk over the pointers, and map each of them to an offset relative to - // first pointer in the array. - Value *Ptr0 = getPointerOperand(VL[0]); - const SCEV *Scev0 = SE.getSCEV(Ptr0); - Value *Obj0 = GetUnderlyingObject(Ptr0, DL); - PointerType *PtrTy = dyn_cast(Ptr0->getType()); - uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); - - for (auto *Val : VL) { - // The only kind of access we care about here is load. - if (!isa(Val)) - return false; - - Value *Ptr = getPointerOperand(Val); - assert(Ptr && "Expected value to have a pointer operand."); - // If a pointer refers to a different underlying object, bail - the - // pointers are by definition incomparable. - Value *CurrObj = GetUnderlyingObject(Ptr, DL); - if (CurrObj != Obj0) - return false; - - const SCEVConstant *Diff = - dyn_cast(SE.getMinusSCEV(SE.getSCEV(Ptr), Scev0)); - // The pointers may not have a constant offset from each other, or SCEV - // may just not be smart enough to figure out they do. Regardless, - // there's nothing we can do. - if (!Diff || static_cast(Diff->getAPInt().abs().getSExtValue()) > - (VL.size() - 1) * Size) - return false; - - OffValPairs.emplace_back(Diff->getAPInt().getSExtValue(), Val); - } - SmallVector UseOrder(VL.size()); - for (unsigned i = 0; i < VL.size(); i++) { - UseOrder[i] = i; - } - - // Sort the memory accesses and keep the order of their uses in UseOrder. - std::sort(UseOrder.begin(), UseOrder.end(), - [&OffValPairs](unsigned Left, unsigned Right) { - return OffValPairs[Left].first < OffValPairs[Right].first; - }); - - for (unsigned i = 0; i < VL.size(); i++) - Sorted.emplace_back(OffValPairs[UseOrder[i]].second); - - // Sort UseOrder to compute the Mask. - if (Mask) { - Mask->reserve(VL.size()); - for (unsigned i = 0; i < VL.size(); i++) - Mask->emplace_back(i); - std::sort(Mask->begin(), Mask->end(), - [&UseOrder](unsigned Left, unsigned Right) { - return UseOrder[Left] < UseOrder[Right]; - }); - } - - return true; -} - - /// Returns true if the memory operations \p A and \p B are consecutive. bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType) { diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index 4f4beeb81c3..d201387debd 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -637,23 +637,17 @@ private: int getEntryCost(TreeEntry *E); /// This is the recursive part of buildTree. - void buildTree_rec(ArrayRef Roots, unsigned Depth, int UserIndx = -1, - int OpdNum = 0); + void buildTree_rec(ArrayRef Roots, unsigned Depth, int); /// \returns True if the ExtractElement/ExtractValue instructions in VL can /// be vectorized to use the original vector (or aggregate "bitcast" to a vector). bool canReuseExtract(ArrayRef VL, Value *OpValue) const; - /// Vectorize a single entry in the tree.\p OpdNum indicate the ordinality of - /// operand corrsponding to this tree entry \p E for the user tree entry - /// indicated by \p UserIndx. - // In other words, "E == TreeEntry[UserIndx].getOperand(OpdNum)". - Value *vectorizeTree(TreeEntry *E, int OpdNum = 0, int UserIndx = -1); + /// Vectorize a single entry in the tree. + Value *vectorizeTree(TreeEntry *E); - /// Vectorize a single entry in the tree, starting in \p VL.\p OpdNum indicate - /// the ordinality of operand corrsponding to the \p VL of scalar values for the - /// user indicated by \p UserIndx this \p VL feeds into. - Value *vectorizeTree(ArrayRef VL, int OpdNum = 0, int UserIndx = -1); + /// Vectorize a single entry in the tree, starting in \p VL. + Value *vectorizeTree(ArrayRef VL); /// \returns the pointer to the vectorized value if \p VL is already /// vectorized, or NULL. They may happen in cycles. @@ -691,7 +685,7 @@ private: SmallVectorImpl &Left, SmallVectorImpl &Right); struct TreeEntry { - TreeEntry(std::vector &Container) : ShuffleMask(), Container(Container) {} + TreeEntry(std::vector &Container) : Container(Container) {} /// \returns true if the scalars in VL are equal to this entry. bool isSame(ArrayRef VL) const { @@ -699,16 +693,6 @@ private: return std::equal(VL.begin(), VL.end(), Scalars.begin()); } - /// \returns true if the scalars in VL are found in this tree entry. - bool isFoundJumbled(ArrayRef VL, const DataLayout &DL, - ScalarEvolution &SE) const { - assert(VL.size() == Scalars.size() && "Invalid size"); - SmallVector List; - if (!sortLoadAccesses(VL, DL, SE, List)) - return false; - return std::equal(List.begin(), List.end(), Scalars.begin()); - } - /// A vector of scalars. ValueList Scalars; @@ -718,14 +702,6 @@ private: /// Do we need to gather this sequence ? bool NeedToGather = false; - /// Records optional shuffle mask for the uses of jumbled memory accesses. - /// For example, a non-empty ShuffleMask[1] represents the permutation of - /// lanes that operand #1 of this vectorized instruction should undergo - /// before feeding this vectorized instruction, whereas an empty - /// ShuffleMask[0] indicates that the lanes of operand #0 of this vectorized - /// instruction need not be permuted at all. - SmallVector ShuffleMask[3]; - /// Points back to the VectorizableTree. /// /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has @@ -741,25 +717,12 @@ private: /// Create a new VectorizableTree entry. TreeEntry *newTreeEntry(ArrayRef VL, bool Vectorized, - int &UserTreeIdx, const InstructionsState &S, - ArrayRef ShuffleMask = None, - int OpdNum = 0) { - assert((!Vectorized || S.Opcode != 0) && - "Vectorized TreeEntry without opcode"); + int &UserTreeIdx) { VectorizableTree.emplace_back(VectorizableTree); - int idx = VectorizableTree.size() - 1; TreeEntry *Last = &VectorizableTree[idx]; Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); Last->NeedToGather = !Vectorized; - - TreeEntry *UserEntry = &VectorizableTree[UserTreeIdx]; - if (!ShuffleMask.empty()) { - assert(UserEntry->ShuffleMask[OpdNum].empty() && "Mask already present!"); - UserEntry->ShuffleMask[OpdNum].insert( - UserEntry->ShuffleMask[OpdNum].begin(), ShuffleMask.begin(), - ShuffleMask.end()); - } if (Vectorized) { for (int i = 0, e = VL.size(); i != e; ++i) { assert(!getTreeEntry(VL[i]) && "Scalar already in tree!"); @@ -1410,34 +1373,34 @@ void BoUpSLP::buildTree(ArrayRef Roots, } void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, - int UserTreeIdx, int OpdNum) { + int UserTreeIdx) { assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); InstructionsState S = getSameOpcode(VL); if (Depth == RecursionMaxDepth) { DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } // Don't handle vectors. if (S.OpValue->getType()->isVectorTy()) { DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } if (StoreInst *SI = dyn_cast(S.OpValue)) if (SI->getValueOperand()->getType()->isVectorTy()) { DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } // If all of the operands are identical or constant we have a simple solution. if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.Opcode) { DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } @@ -1449,7 +1412,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, if (EphValues.count(VL[i])) { DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << ") is ephemeral.\n"); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } } @@ -1460,7 +1423,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); if (E->Scalars[i] != VL[i]) { DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } } @@ -1479,7 +1442,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, if (getTreeEntry(I)) { DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << ") is already in tree.\n"); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } } @@ -1489,7 +1452,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, for (unsigned i = 0, e = VL.size(); i != e; ++i) { if (MustGather.count(VL[i])) { DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } } @@ -1503,7 +1466,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, // Don't go into unreachable blocks. They may contain instructions with // dependency cycles which confuse the final scheduling. DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } @@ -1512,7 +1475,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, for (unsigned j = i + 1; j < e; ++j) if (VL[i] == VL[j]) { DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } @@ -1527,7 +1490,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, assert((!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure"); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); @@ -1546,12 +1509,12 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, if (Term) { DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } } - newTreeEntry(VL, true, UserTreeIdx, S); + newTreeEntry(VL, true, UserTreeIdx); DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { @@ -1561,7 +1524,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, Operands.push_back(cast(j)->getIncomingValueForBlock( PH->getIncomingBlock(i))); - buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); + buildTree_rec(Operands, Depth + 1, UserTreeIdx); } return; } @@ -1573,7 +1536,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, } else { BS.cancelScheduling(VL, VL0); } - newTreeEntry(VL, Reuse, UserTreeIdx, S); + newTreeEntry(VL, Reuse, UserTreeIdx); return; } case Instruction::Load: { @@ -1588,7 +1551,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, if (DL->getTypeSizeInBits(ScalarTy) != DL->getTypeAllocSizeInBits(ScalarTy)) { BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); return; } @@ -1599,13 +1562,15 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, LoadInst *L = cast(VL[i]); if (!L->isSimple()) { BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); return; } } // Check if the loads are consecutive, reversed, or neither. + // TODO: What we really want is to sort the loads, but for now, check + // the two likely directions. bool Consecutive = true; bool ReverseConsecutive = true; for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { @@ -1619,7 +1584,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, if (Consecutive) { ++NumLoadsWantToKeepOrder; - newTreeEntry(VL, true, UserTreeIdx, S); + newTreeEntry(VL, true, UserTreeIdx); DEBUG(dbgs() << "SLP: added a vector of loads.\n"); return; } @@ -1633,41 +1598,15 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, break; } + BS.cancelScheduling(VL, VL0); + newTreeEntry(VL, false, UserTreeIdx); + if (ReverseConsecutive) { - DEBUG(dbgs() << "SLP: Gathering reversed loads.\n"); ++NumLoadsWantToChangeOrder; - BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); - return; - } - - if (VL.size() > 2) { - bool ShuffledLoads = true; - SmallVector Sorted; - SmallVector Mask; - if (sortLoadAccesses(VL, *DL, *SE, Sorted, &Mask)) { - auto NewVL = makeArrayRef(Sorted.begin(), Sorted.end()); - for (unsigned i = 0, e = NewVL.size() - 1; i < e; ++i) { - if (!isConsecutiveAccess(NewVL[i], NewVL[i + 1], *DL, *SE)) { - ShuffledLoads = false; - break; - } - } - // TODO: Tracking how many load wants to have arbitrary shuffled order - // would be usefull. - if (ShuffledLoads) { - DEBUG(dbgs() << "SLP: added a vector of loads which needs " - "permutation of loaded lanes.\n"); - newTreeEntry(NewVL, true, UserTreeIdx, S, - makeArrayRef(Mask.begin(), Mask.end()), OpdNum); - return; - } - } + DEBUG(dbgs() << "SLP: Gathering reversed loads.\n"); + } else { + DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); } - - DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); - BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); return; } case Instruction::ZExt: @@ -1687,12 +1626,12 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, Type *Ty = cast(VL[i])->getOperand(0)->getType(); if (Ty != SrcTy || !isValidElementType(Ty)) { BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); return; } } - newTreeEntry(VL, true, UserTreeIdx, S); + newTreeEntry(VL, true, UserTreeIdx); DEBUG(dbgs() << "SLP: added a vector of casts.\n"); for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { @@ -1701,7 +1640,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, for (Value *j : VL) Operands.push_back(cast(j)->getOperand(i)); - buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); + buildTree_rec(Operands, Depth + 1, UserTreeIdx); } return; } @@ -1715,13 +1654,13 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, if (Cmp->getPredicate() != P0 || Cmp->getOperand(0)->getType() != ComparedTy) { BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); return; } } - newTreeEntry(VL, true, UserTreeIdx, S); + newTreeEntry(VL, true, UserTreeIdx); DEBUG(dbgs() << "SLP: added a vector of compares.\n"); for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { @@ -1730,7 +1669,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, for (Value *j : VL) Operands.push_back(cast(j)->getOperand(i)); - buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); + buildTree_rec(Operands, Depth + 1, UserTreeIdx); } return; } @@ -1753,7 +1692,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, case Instruction::And: case Instruction::Or: case Instruction::Xor: - newTreeEntry(VL, true, UserTreeIdx, S); + newTreeEntry(VL, true, UserTreeIdx); DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); // Sort operands of the instructions so that each side is more likely to @@ -1762,7 +1701,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, ValueList Left, Right; reorderInputsAccordingToOpcode(S.Opcode, VL, Left, Right); buildTree_rec(Left, Depth + 1, UserTreeIdx); - buildTree_rec(Right, Depth + 1, UserTreeIdx, 1); + buildTree_rec(Right, Depth + 1, UserTreeIdx); return; } @@ -1772,7 +1711,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, for (Value *j : VL) Operands.push_back(cast(j)->getOperand(i)); - buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); + buildTree_rec(Operands, Depth + 1, UserTreeIdx); } return; @@ -1782,7 +1721,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, if (cast(VL[j])->getNumOperands() != 2) { DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } } @@ -1795,7 +1734,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, if (Ty0 != CurTy) { DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } } @@ -1807,12 +1746,12 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, DEBUG( dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); return; } } - newTreeEntry(VL, true, UserTreeIdx, S); + newTreeEntry(VL, true, UserTreeIdx); DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); for (unsigned i = 0, e = 2; i < e; ++i) { ValueList Operands; @@ -1820,7 +1759,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, for (Value *j : VL) Operands.push_back(cast(j)->getOperand(i)); - buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); + buildTree_rec(Operands, Depth + 1, UserTreeIdx); } return; } @@ -1829,12 +1768,12 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); return; } - newTreeEntry(VL, true, UserTreeIdx, S); + newTreeEntry(VL, true, UserTreeIdx); DEBUG(dbgs() << "SLP: added a vector of stores.\n"); ValueList Operands; @@ -1852,7 +1791,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); if (!isTriviallyVectorizable(ID)) { BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); return; } @@ -1866,7 +1805,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, getVectorIntrinsicIDForCall(CI2, TLI) != ID || !CI->hasIdenticalOperandBundleSchema(*CI2)) { BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] << "\n"); return; @@ -1877,7 +1816,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, Value *A1J = CI2->getArgOperand(1); if (A1I != A1J) { BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI << " argument "<< A1I<<"!=" << A1J << "\n"); @@ -1890,14 +1829,14 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, CI->op_begin() + CI->getBundleOperandsEndIndex(), CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" << *VL[i] << '\n'); return; } } - newTreeEntry(VL, true, UserTreeIdx, S); + newTreeEntry(VL, true, UserTreeIdx); for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { ValueList Operands; // Prepare the operand vector. @@ -1905,7 +1844,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, CallInst *CI2 = dyn_cast(j); Operands.push_back(CI2->getArgOperand(i)); } - buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); + buildTree_rec(Operands, Depth + 1, UserTreeIdx); } return; } @@ -1914,11 +1853,11 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, // then do not vectorize this instruction. if (!S.IsAltShuffle) { BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); return; } - newTreeEntry(VL, true, UserTreeIdx, S); + newTreeEntry(VL, true, UserTreeIdx); DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); // Reorder operands if reordering would enable vectorization. @@ -1926,7 +1865,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, ValueList Left, Right; reorderAltShuffleOperands(S.Opcode, VL, Left, Right); buildTree_rec(Left, Depth + 1, UserTreeIdx); - buildTree_rec(Right, Depth + 1, UserTreeIdx, 1); + buildTree_rec(Right, Depth + 1, UserTreeIdx); return; } @@ -1936,13 +1875,13 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, for (Value *j : VL) Operands.push_back(cast(j)->getOperand(i)); - buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); + buildTree_rec(Operands, Depth + 1, UserTreeIdx); } return; default: BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); + newTreeEntry(VL, false, UserTreeIdx); DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); return; } @@ -2780,15 +2719,12 @@ Value *BoUpSLP::alreadyVectorized(ArrayRef VL, Value *OpValue) const { return nullptr; } -Value *BoUpSLP::vectorizeTree(ArrayRef VL, int OpdNum, int UserIndx) { +Value *BoUpSLP::vectorizeTree(ArrayRef VL) { InstructionsState S = getSameOpcode(VL); if (S.Opcode) { if (TreeEntry *E = getTreeEntry(S.OpValue)) { - TreeEntry *UserTreeEntry = &VectorizableTree[UserIndx]; - if (E->isSame(VL) || - (UserTreeEntry && !UserTreeEntry->ShuffleMask[OpdNum].empty() && - E->isFoundJumbled(VL, *DL, *SE))) - return vectorizeTree(E, OpdNum, UserIndx); + if (E->isSame(VL)) + return vectorizeTree(E); } } @@ -2800,10 +2736,9 @@ Value *BoUpSLP::vectorizeTree(ArrayRef VL, int OpdNum, int UserIndx) { return Gather(VL, VecTy); } -Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { +Value *BoUpSLP::vectorizeTree(TreeEntry *E) { IRBuilder<>::InsertPointGuard Guard(Builder); - TreeEntry *UserTreeEntry = nullptr; if (E->VectorizedValue) { DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); return E->VectorizedValue; @@ -2823,10 +2758,6 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { return V; } - assert(ScalarToTreeEntry.count(E->Scalars[0]) && - "Expected user tree entry, missing!"); - int CurrIndx = ScalarToTreeEntry[E->Scalars[0]]; - unsigned ShuffleOrOp = S.IsAltShuffle ? (unsigned) Instruction::ShuffleVector : S.Opcode; switch (ShuffleOrOp) { @@ -2856,7 +2787,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { Builder.SetInsertPoint(IBB->getTerminator()); Builder.SetCurrentDebugLocation(PH->getDebugLoc()); - Value *Vec = vectorizeTree(Operands, i, CurrIndx); + Value *Vec = vectorizeTree(Operands); NewPhi->addIncoming(Vec, IBB); } @@ -2909,7 +2840,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { setInsertPointAfterBundle(E->Scalars, VL0); - Value *InVec = vectorizeTree(INVL, 0, CurrIndx); + Value *InVec = vectorizeTree(INVL); if (Value *V = alreadyVectorized(E->Scalars, VL0)) return V; @@ -2930,8 +2861,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { setInsertPointAfterBundle(E->Scalars, VL0); - Value *L = vectorizeTree(LHSV, 0, CurrIndx); - Value *R = vectorizeTree(RHSV, 1, CurrIndx); + Value *L = vectorizeTree(LHSV); + Value *R = vectorizeTree(RHSV); if (Value *V = alreadyVectorized(E->Scalars, VL0)) return V; @@ -2958,9 +2889,9 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { setInsertPointAfterBundle(E->Scalars, VL0); - Value *Cond = vectorizeTree(CondVec, 0, CurrIndx); - Value *True = vectorizeTree(TrueVec, 1, CurrIndx); - Value *False = vectorizeTree(FalseVec, 2, CurrIndx); + Value *Cond = vectorizeTree(CondVec); + Value *True = vectorizeTree(TrueVec); + Value *False = vectorizeTree(FalseVec); if (Value *V = alreadyVectorized(E->Scalars, VL0)) return V; @@ -3001,8 +2932,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { setInsertPointAfterBundle(E->Scalars, VL0); - Value *LHS = vectorizeTree(LHSVL, 0, CurrIndx); - Value *RHS = vectorizeTree(RHSVL, 1, CurrIndx); + Value *LHS = vectorizeTree(LHSVL); + Value *RHS = vectorizeTree(RHSVL); if (Value *V = alreadyVectorized(E->Scalars, VL0)) return V; @@ -3023,17 +2954,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { // sink them all the way down past store instructions. setInsertPointAfterBundle(E->Scalars, VL0); - if(UserIndx != -1) { - UserTreeEntry = &VectorizableTree[UserIndx]; - } - - LoadInst *LI = NULL; - if (UserTreeEntry && !UserTreeEntry->ShuffleMask[OpdNum].empty()) { - LI = cast(E->Scalars[0]); - } else { - LI = cast(VL0); - } - + LoadInst *LI = cast(VL0); Type *ScalarLoadTy = LI->getType(); unsigned AS = LI->getPointerAddressSpace(); @@ -3055,24 +2976,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { LI->setAlignment(Alignment); E->VectorizedValue = LI; ++NumVectorInstructions; - propagateMetadata(LI, E->Scalars); - - if (UserTreeEntry && !UserTreeEntry->ShuffleMask[OpdNum].empty()) { - SmallVector Mask; - for (unsigned Lane = 0, LE = UserTreeEntry->ShuffleMask[OpdNum].size(); - Lane != LE; ++Lane) { - Mask.push_back( - Builder.getInt32(UserTreeEntry->ShuffleMask[OpdNum][Lane])); - } - // Generate shuffle for jumbled memory access - Value *Undef = UndefValue::get(VecTy); - Value *Shuf = Builder.CreateShuffleVector((Value *)LI, Undef, - ConstantVector::get(Mask)); - E->VectorizedValue = Shuf; - ++NumVectorInstructions; - return Shuf; - } - return LI; + return propagateMetadata(LI, E->Scalars); } case Instruction::Store: { StoreInst *SI = cast(VL0); @@ -3085,7 +2989,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { setInsertPointAfterBundle(E->Scalars, VL0); - Value *VecValue = vectorizeTree(ScalarStoreValues, 0, CurrIndx); + Value *VecValue = vectorizeTree(ScalarStoreValues); Value *ScalarPtr = SI->getPointerOperand(); Value *VecPtr = Builder.CreateBitCast(ScalarPtr, VecTy->getPointerTo(AS)); StoreInst *S = Builder.CreateStore(VecValue, VecPtr); @@ -3111,7 +3015,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { for (Value *V : E->Scalars) Op0VL.push_back(cast(V)->getOperand(0)); - Value *Op0 = vectorizeTree(Op0VL, 0, CurrIndx); + Value *Op0 = vectorizeTree(Op0VL); std::vector OpVecs; for (int j = 1, e = cast(VL0)->getNumOperands(); j < e; @@ -3120,7 +3024,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { for (Value *V : E->Scalars) OpVL.push_back(cast(V)->getOperand(j)); - Value *OpVec = vectorizeTree(OpVL, j, CurrIndx); + Value *OpVec = vectorizeTree(OpVL); OpVecs.push_back(OpVec); } @@ -3159,7 +3063,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { OpVL.push_back(CEI->getArgOperand(j)); } - Value *OpVec = vectorizeTree(OpVL, j, CurrIndx); + Value *OpVec = vectorizeTree(OpVL); DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); OpVecs.push_back(OpVec); } @@ -3190,8 +3094,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { reorderAltShuffleOperands(S.Opcode, E->Scalars, LHSVL, RHSVL); setInsertPointAfterBundle(E->Scalars, VL0); - Value *LHS = vectorizeTree(LHSVL, 0, CurrIndx); - Value *RHS = vectorizeTree(RHSVL, 1, CurrIndx); + Value *LHS = vectorizeTree(LHSVL); + Value *RHS = vectorizeTree(RHSVL); if (Value *V = alreadyVectorized(E->Scalars, VL0)) return V; @@ -3291,14 +3195,9 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { continue; TreeEntry *E = getTreeEntry(Scalar); assert(E && "Invalid scalar"); - assert((!E->NeedToGather) && "Extracting from a gather list"); + assert(!E->NeedToGather && "Extracting from a gather list"); - Value *Vec = dyn_cast(E->VectorizedValue); - if (Vec && dyn_cast(cast(Vec)->getOperand(0))) { - Vec = cast(E->VectorizedValue)->getOperand(0); - } else { - Vec = E->VectorizedValue; - } + Value *Vec = E->VectorizedValue; assert(Vec && "Can't find vectorizable value"); Value *Lane = Builder.getInt32(ExternalUse.Lane); diff --git a/test/Transforms/SLPVectorizer/X86/jumbled-load-multiuse.ll b/test/Transforms/SLPVectorizer/X86/jumbled-load-multiuse.ll index 4def8ce561c..557a83a7562 100644 --- a/test/Transforms/SLPVectorizer/X86/jumbled-load-multiuse.ll +++ b/test/Transforms/SLPVectorizer/X86/jumbled-load-multiuse.ll @@ -11,16 +11,20 @@ define i32 @fn1() { ; CHECK-LABEL: @fn1( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @b to <4 x i32>*), align 4 -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[TMP1]], zeroinitializer -; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1 -; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[TMP3]], i32 0 -; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 1 -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 2 -; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 8, i32 3 -; CHECK-NEXT: [[TMP8:%.*]] = select <4 x i1> [[TMP2]], <4 x i32> [[TMP7]], <4 x i32> -; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 0), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 1), align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 2), align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 3), align 4 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[TMP2]], i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP3]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP0]], i32 3 +; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt <4 x i32> [[TMP7]], zeroinitializer +; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> [[TMP4]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 1 +; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP9]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 2 +; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 8, i32 3 +; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP8]], <4 x i32> [[TMP11]], <4 x i32> +; CHECK-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 4 ; CHECK-NEXT: ret i32 0 ; entry: diff --git a/test/Transforms/SLPVectorizer/X86/jumbled-load-shuffle-placement.ll b/test/Transforms/SLPVectorizer/X86/jumbled-load-shuffle-placement.ll deleted file mode 100644 index cddb6d7076d..00000000000 --- a/test/Transforms/SLPVectorizer/X86/jumbled-load-shuffle-placement.ll +++ /dev/null @@ -1,68 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx -slp-vectorizer | FileCheck %s - - -;void jumble (int * restrict A, int * restrict B) { - ; int tmp0 = A[10]*A[0]; - ; int tmp1 = A[11]*A[1]; - ; int tmp2 = A[12]*A[3]; - ; int tmp3 = A[13]*A[2]; - ; B[0] = tmp0; - ; B[1] = tmp1; - ; B[2] = tmp2; - ; B[3] = tmp3; - ;} - ; Function Attrs: norecurse nounwind uwtable - define void @jumble(i32* noalias nocapture readonly %A, i32* noalias nocapture %B) { -; CHECK-LABEL: @jumble( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 10 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 11 -; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 1 -; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 12 -; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3 -; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 13 -; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[ARRAYIDX]] to <4 x i32>* -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4 -; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2 -; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[A]] to <4 x i32>* -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4 -; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> undef, <4 x i32> -; CHECK-NEXT: [[TMP5:%.*]] = mul nsw <4 x i32> [[TMP4]], [[TMP1]] -; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1 -; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2 -; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3 -; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[B]] to <4 x i32>* -; CHECK-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4 -; CHECK-NEXT: ret void -; -entry: - %arrayidx = getelementptr inbounds i32, i32* %A, i64 10 - %0 = load i32, i32* %arrayidx, align 4 - %1 = load i32, i32* %A, align 4 - %mul = mul nsw i32 %1, %0 - %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 11 - %2 = load i32, i32* %arrayidx2, align 4 - %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1 - %3 = load i32, i32* %arrayidx3, align 4 - %mul4 = mul nsw i32 %3, %2 - %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 12 - %4 = load i32, i32* %arrayidx5, align 4 - %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 3 - %5 = load i32, i32* %arrayidx6, align 4 - %mul7 = mul nsw i32 %5, %4 - %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 13 - %6 = load i32, i32* %arrayidx8, align 4 - %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 2 - %7 = load i32, i32* %arrayidx9, align 4 - %mul10 = mul nsw i32 %7, %6 - store i32 %mul, i32* %B, align 4 - %arrayidx12 = getelementptr inbounds i32, i32* %B, i64 1 - store i32 %mul4, i32* %arrayidx12, align 4 - %arrayidx13 = getelementptr inbounds i32, i32* %B, i64 2 - store i32 %mul7, i32* %arrayidx13, align 4 - %arrayidx14 = getelementptr inbounds i32, i32* %B, i64 3 - store i32 %mul10, i32* %arrayidx14, align 4 - ret void - } - diff --git a/test/Transforms/SLPVectorizer/X86/jumbled-load.ll b/test/Transforms/SLPVectorizer/X86/jumbled-load.ll index be58521ed89..06e051a90b0 100644 --- a/test/Transforms/SLPVectorizer/X86/jumbled-load.ll +++ b/test/Transforms/SLPVectorizer/X86/jumbled-load.ll @@ -5,27 +5,34 @@ define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) { ; CHECK-LABEL: @jumbled-load( -; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0 +; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* %in, i64 0 +; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[IN_ADDR]], align 4 ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3 +; CHECK-NEXT: [[LOAD_2:%.*]] = load i32, i32* [[GEP_1]], align 4 ; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1 +; CHECK-NEXT: [[LOAD_3:%.*]] = load i32, i32* [[GEP_2]], align 4 ; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2 -; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[IN_ADDR]] to <4 x i32>* -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> -; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0 +; CHECK-NEXT: [[LOAD_4:%.*]] = load i32, i32* [[GEP_3]], align 4 +; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* %inn, i64 0 +; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, i32* [[INN_ADDR]], align 4 ; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2 +; CHECK-NEXT: [[LOAD_6:%.*]] = load i32, i32* [[GEP_4]], align 4 ; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3 +; CHECK-NEXT: [[LOAD_7:%.*]] = load i32, i32* [[GEP_5]], align 4 ; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1 -; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[INN_ADDR]] to <4 x i32>* -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 4 -; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> undef, <4 x i32> -; CHECK-NEXT: [[TMP7:%.*]] = mul <4 x i32> [[TMP3]], [[TMP6]] -; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0 -; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1 -; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2 -; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3 -; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[GEP_7]] to <4 x i32>* -; CHECK-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 4 +; CHECK-NEXT: [[LOAD_8:%.*]] = load i32, i32* [[GEP_6]], align 4 +; CHECK-NEXT: [[MUL_1:%.*]] = mul i32 [[LOAD_3]], [[LOAD_5]] +; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[LOAD_2]], [[LOAD_8]] +; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 [[LOAD_4]], [[LOAD_7]] +; CHECK-NEXT: [[MUL_4:%.*]] = mul i32 [[LOAD_1]], [[LOAD_6]] +; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* %out, i64 0 +; CHECK-NEXT: store i32 [[MUL_1]], i32* [[GEP_7]], align 4 +; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* %out, i64 1 +; CHECK-NEXT: store i32 [[MUL_2]], i32* [[GEP_8]], align 4 +; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* %out, i64 2 +; CHECK-NEXT: store i32 [[MUL_3]], i32* [[GEP_9]], align 4 +; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* %out, i64 3 +; CHECK-NEXT: store i32 [[MUL_4]], i32* [[GEP_10]], align 4 ; CHECK-NEXT: ret i32 undef ; %in.addr = getelementptr inbounds i32, i32* %in, i64 0 diff --git a/test/Transforms/SLPVectorizer/X86/store-jumbled.ll b/test/Transforms/SLPVectorizer/X86/store-jumbled.ll index 6ae76352001..1b2c76384e0 100644 --- a/test/Transforms/SLPVectorizer/X86/store-jumbled.ll +++ b/test/Transforms/SLPVectorizer/X86/store-jumbled.ll @@ -6,26 +6,33 @@ define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) { ; CHECK-LABEL: @jumbled-load( ; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0 +; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[IN_ADDR]], align 4 ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1 +; CHECK-NEXT: [[LOAD_2:%.*]] = load i32, i32* [[GEP_1]], align 4 ; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2 +; CHECK-NEXT: [[LOAD_3:%.*]] = load i32, i32* [[GEP_2]], align 4 ; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3 -; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[IN_ADDR]] to <4 x i32>* -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> +; CHECK-NEXT: [[LOAD_4:%.*]] = load i32, i32* [[GEP_3]], align 4 ; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0 +; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, i32* [[INN_ADDR]], align 4 ; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1 +; CHECK-NEXT: [[LOAD_6:%.*]] = load i32, i32* [[GEP_4]], align 4 ; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2 +; CHECK-NEXT: [[LOAD_7:%.*]] = load i32, i32* [[GEP_5]], align 4 ; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3 -; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[INN_ADDR]] to <4 x i32>* -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 4 -; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> undef, <4 x i32> -; CHECK-NEXT: [[TMP7:%.*]] = mul <4 x i32> [[TMP3]], [[TMP6]] +; CHECK-NEXT: [[LOAD_8:%.*]] = load i32, i32* [[GEP_6]], align 4 +; CHECK-NEXT: [[MUL_1:%.*]] = mul i32 [[LOAD_1]], [[LOAD_5]] +; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[LOAD_2]], [[LOAD_6]] +; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 [[LOAD_3]], [[LOAD_7]] +; CHECK-NEXT: [[MUL_4:%.*]] = mul i32 [[LOAD_4]], [[LOAD_8]] ; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0 ; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1 ; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2 ; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3 -; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[GEP_7]] to <4 x i32>* -; CHECK-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 4 +; CHECK-NEXT: store i32 [[MUL_1]], i32* [[GEP_9]], align 4 +; CHECK-NEXT: store i32 [[MUL_2]], i32* [[GEP_7]], align 4 +; CHECK-NEXT: store i32 [[MUL_3]], i32* [[GEP_10]], align 4 +; CHECK-NEXT: store i32 [[MUL_4]], i32* [[GEP_8]], align 4 ; CHECK-NEXT: ret i32 undef ; %in.addr = getelementptr inbounds i32, i32* %in, i64 0 -- 2.11.0