1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This transformation implements the well known scalar replacement of
11 /// aggregates transformation. It tries to identify promotable elements of an
12 /// aggregate alloca, and promote them to registers. It will also try to
13 /// convert uses of an element (or set of elements) of an alloca into a vector
14 /// or bitfield-style integer scalar if appropriate.
16 /// It works to do this with minimal slicing of the alloca so that regions
17 /// which are merely transferred in and out of external memory remain unchanged
18 /// and are not decomposed to scalar code.
20 /// Because this also performs alloca promotion, it can be thought of as also
21 /// serving the purpose of SSA formation. The algorithm iterates on the
22 /// function until all opportunities for promotion have been realized.
24 //===----------------------------------------------------------------------===//
26 #include "llvm/Transforms/Scalar/SROA.h"
27 #include "llvm/ADT/APInt.h"
28 #include "llvm/ADT/ArrayRef.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/PointerIntPair.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/ADT/SetVector.h"
33 #include "llvm/ADT/SmallBitVector.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/SmallVector.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/StringRef.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/ADT/iterator.h"
40 #include "llvm/ADT/iterator_range.h"
41 #include "llvm/Analysis/AssumptionCache.h"
42 #include "llvm/Analysis/GlobalsModRef.h"
43 #include "llvm/Analysis/Loads.h"
44 #include "llvm/Analysis/PtrUseVisitor.h"
45 #include "llvm/Analysis/Utils/Local.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/ConstantFolder.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DIBuilder.h"
51 #include "llvm/IR/DataLayout.h"
52 #include "llvm/IR/DebugInfoMetadata.h"
53 #include "llvm/IR/DerivedTypes.h"
54 #include "llvm/IR/Dominators.h"
55 #include "llvm/IR/Function.h"
56 #include "llvm/IR/GetElementPtrTypeIterator.h"
57 #include "llvm/IR/GlobalAlias.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InstVisitor.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/LLVMContext.h"
66 #include "llvm/IR/Metadata.h"
67 #include "llvm/IR/Module.h"
68 #include "llvm/IR/Operator.h"
69 #include "llvm/IR/PassManager.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/Pass.h"
75 #include "llvm/Support/Casting.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/Compiler.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/Transforms/Scalar.h"
83 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
97 // We only use this for a debug check.
101 using namespace llvm;
102 using namespace llvm::sroa;
104 #define DEBUG_TYPE "sroa"
106 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
107 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
108 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
109 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
110 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
111 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
112 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
113 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
114 STATISTIC(NumDeleted, "Number of instructions deleted");
115 STATISTIC(NumVectorized, "Number of vectorized aggregates");
117 /// Hidden option to enable randomly shuffling the slices to help uncover
118 /// instability in their order.
119 static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices",
120 cl::init(false), cl::Hidden);
122 /// Hidden option to experiment with completely strict handling of inbounds
124 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false),
129 /// \brief A custom IRBuilder inserter which prefixes all names, but only in
131 class IRBuilderPrefixedInserter : public IRBuilderDefaultInserter {
134 const Twine getNameWithPrefix(const Twine &Name) const {
135 return Name.isTriviallyEmpty() ? Name : Prefix + Name;
139 void SetNamePrefix(const Twine &P) { Prefix = P.str(); }
142 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
143 BasicBlock::iterator InsertPt) const {
144 IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB,
149 /// \brief Provide a type for IRBuilder that drops names in release builds.
150 using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>;
152 /// \brief A used slice of an alloca.
154 /// This structure represents a slice of an alloca used by some instruction. It
155 /// stores both the begin and end offsets of this use, a pointer to the use
156 /// itself, and a flag indicating whether we can classify the use as splittable
157 /// or not when forming partitions of the alloca.
159 /// \brief The beginning offset of the range.
160 uint64_t BeginOffset = 0;
162 /// \brief The ending offset, not included in the range.
163 uint64_t EndOffset = 0;
165 /// \brief Storage for both the use of this slice and whether it can be
167 PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
172 Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
173 : BeginOffset(BeginOffset), EndOffset(EndOffset),
174 UseAndIsSplittable(U, IsSplittable) {}
176 uint64_t beginOffset() const { return BeginOffset; }
177 uint64_t endOffset() const { return EndOffset; }
179 bool isSplittable() const { return UseAndIsSplittable.getInt(); }
180 void makeUnsplittable() { UseAndIsSplittable.setInt(false); }
182 Use *getUse() const { return UseAndIsSplittable.getPointer(); }
184 bool isDead() const { return getUse() == nullptr; }
185 void kill() { UseAndIsSplittable.setPointer(nullptr); }
187 /// \brief Support for ordering ranges.
189 /// This provides an ordering over ranges such that start offsets are
190 /// always increasing, and within equal start offsets, the end offsets are
191 /// decreasing. Thus the spanning range comes first in a cluster with the
192 /// same start position.
193 bool operator<(const Slice &RHS) const {
194 if (beginOffset() < RHS.beginOffset())
196 if (beginOffset() > RHS.beginOffset())
198 if (isSplittable() != RHS.isSplittable())
199 return !isSplittable();
200 if (endOffset() > RHS.endOffset())
205 /// \brief Support comparison with a single offset to allow binary searches.
206 friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
207 uint64_t RHSOffset) {
208 return LHS.beginOffset() < RHSOffset;
210 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
212 return LHSOffset < RHS.beginOffset();
215 bool operator==(const Slice &RHS) const {
216 return isSplittable() == RHS.isSplittable() &&
217 beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset();
219 bool operator!=(const Slice &RHS) const { return !operator==(RHS); }
222 } // end anonymous namespace
226 template <typename T> struct isPodLike;
227 template <> struct isPodLike<Slice> { static const bool value = true; };
229 } // end namespace llvm
231 /// \brief Representation of the alloca slices.
233 /// This class represents the slices of an alloca which are formed by its
234 /// various uses. If a pointer escapes, we can't fully build a representation
235 /// for the slices used and we reflect that in this structure. The uses are
236 /// stored, sorted by increasing beginning offset and with unsplittable slices
237 /// starting at a particular offset before splittable slices.
238 class llvm::sroa::AllocaSlices {
240 /// \brief Construct the slices of a particular alloca.
241 AllocaSlices(const DataLayout &DL, AllocaInst &AI);
243 /// \brief Test whether a pointer to the allocation escapes our analysis.
245 /// If this is true, the slices are never fully built and should be
247 bool isEscaped() const { return PointerEscapingInstr; }
249 /// \brief Support for iterating over the slices.
251 using iterator = SmallVectorImpl<Slice>::iterator;
252 using range = iterator_range<iterator>;
254 iterator begin() { return Slices.begin(); }
255 iterator end() { return Slices.end(); }
257 using const_iterator = SmallVectorImpl<Slice>::const_iterator;
258 using const_range = iterator_range<const_iterator>;
260 const_iterator begin() const { return Slices.begin(); }
261 const_iterator end() const { return Slices.end(); }
264 /// \brief Erase a range of slices.
265 void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); }
267 /// \brief Insert new slices for this alloca.
269 /// This moves the slices into the alloca's slices collection, and re-sorts
270 /// everything so that the usual ordering properties of the alloca's slices
272 void insert(ArrayRef<Slice> NewSlices) {
273 int OldSize = Slices.size();
274 Slices.append(NewSlices.begin(), NewSlices.end());
275 auto SliceI = Slices.begin() + OldSize;
276 std::sort(SliceI, Slices.end());
277 std::inplace_merge(Slices.begin(), SliceI, Slices.end());
280 // Forward declare the iterator and range accessor for walking the
282 class partition_iterator;
283 iterator_range<partition_iterator> partitions();
285 /// \brief Access the dead users for this alloca.
286 ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; }
288 /// \brief Access the dead operands referring to this alloca.
290 /// These are operands which have cannot actually be used to refer to the
291 /// alloca as they are outside its range and the user doesn't correct for
292 /// that. These mostly consist of PHI node inputs and the like which we just
293 /// need to replace with undef.
294 ArrayRef<Use *> getDeadOperands() const { return DeadOperands; }
296 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
297 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
298 void printSlice(raw_ostream &OS, const_iterator I,
299 StringRef Indent = " ") const;
300 void printUse(raw_ostream &OS, const_iterator I,
301 StringRef Indent = " ") const;
302 void print(raw_ostream &OS) const;
303 void dump(const_iterator I) const;
308 template <typename DerivedT, typename RetT = void> class BuilderBase;
311 friend class AllocaSlices::SliceBuilder;
313 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
314 /// \brief Handle to alloca instruction to simplify method interfaces.
318 /// \brief The instruction responsible for this alloca not having a known set
321 /// When an instruction (potentially) escapes the pointer to the alloca, we
322 /// store a pointer to that here and abort trying to form slices of the
323 /// alloca. This will be null if the alloca slices are analyzed successfully.
324 Instruction *PointerEscapingInstr;
326 /// \brief The slices of the alloca.
328 /// We store a vector of the slices formed by uses of the alloca here. This
329 /// vector is sorted by increasing begin offset, and then the unsplittable
330 /// slices before the splittable ones. See the Slice inner class for more
332 SmallVector<Slice, 8> Slices;
334 /// \brief Instructions which will become dead if we rewrite the alloca.
336 /// Note that these are not separated by slice. This is because we expect an
337 /// alloca to be completely rewritten or not rewritten at all. If rewritten,
338 /// all these instructions can simply be removed and replaced with undef as
339 /// they come from outside of the allocated space.
340 SmallVector<Instruction *, 8> DeadUsers;
342 /// \brief Operands which will become dead if we rewrite the alloca.
344 /// These are operands that in their particular use can be replaced with
345 /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
346 /// to PHI nodes and the like. They aren't entirely dead (there might be
347 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
348 /// want to swap this particular input for undef to simplify the use lists of
350 SmallVector<Use *, 8> DeadOperands;
353 /// \brief A partition of the slices.
355 /// An ephemeral representation for a range of slices which can be viewed as
356 /// a partition of the alloca. This range represents a span of the alloca's
357 /// memory which cannot be split, and provides access to all of the slices
358 /// overlapping some part of the partition.
360 /// Objects of this type are produced by traversing the alloca's slices, but
361 /// are only ephemeral and not persistent.
362 class llvm::sroa::Partition {
364 friend class AllocaSlices;
365 friend class AllocaSlices::partition_iterator;
367 using iterator = AllocaSlices::iterator;
369 /// \brief The beginning and ending offsets of the alloca for this
371 uint64_t BeginOffset, EndOffset;
373 /// \brief The start and end iterators of this partition.
376 /// \brief A collection of split slice tails overlapping the partition.
377 SmallVector<Slice *, 4> SplitTails;
379 /// \brief Raw constructor builds an empty partition starting and ending at
380 /// the given iterator.
381 Partition(iterator SI) : SI(SI), SJ(SI) {}
384 /// \brief The start offset of this partition.
386 /// All of the contained slices start at or after this offset.
387 uint64_t beginOffset() const { return BeginOffset; }
389 /// \brief The end offset of this partition.
391 /// All of the contained slices end at or before this offset.
392 uint64_t endOffset() const { return EndOffset; }
394 /// \brief The size of the partition.
396 /// Note that this can never be zero.
397 uint64_t size() const {
398 assert(BeginOffset < EndOffset && "Partitions must span some bytes!");
399 return EndOffset - BeginOffset;
402 /// \brief Test whether this partition contains no slices, and merely spans
403 /// a region occupied by split slices.
404 bool empty() const { return SI == SJ; }
406 /// \name Iterate slices that start within the partition.
407 /// These may be splittable or unsplittable. They have a begin offset >= the
408 /// partition begin offset.
410 // FIXME: We should probably define a "concat_iterator" helper and use that
411 // to stitch together pointee_iterators over the split tails and the
412 // contiguous iterators of the partition. That would give a much nicer
413 // interface here. We could then additionally expose filtered iterators for
414 // split, unsplit, and unsplittable splices based on the usage patterns.
415 iterator begin() const { return SI; }
416 iterator end() const { return SJ; }
419 /// \brief Get the sequence of split slice tails.
421 /// These tails are of slices which start before this partition but are
422 /// split and overlap into the partition. We accumulate these while forming
424 ArrayRef<Slice *> splitSliceTails() const { return SplitTails; }
427 /// \brief An iterator over partitions of the alloca's slices.
429 /// This iterator implements the core algorithm for partitioning the alloca's
430 /// slices. It is a forward iterator as we don't support backtracking for
431 /// efficiency reasons, and re-use a single storage area to maintain the
432 /// current set of split slices.
434 /// It is templated on the slice iterator type to use so that it can operate
435 /// with either const or non-const slice iterators.
436 class AllocaSlices::partition_iterator
437 : public iterator_facade_base<partition_iterator, std::forward_iterator_tag,
439 friend class AllocaSlices;
441 /// \brief Most of the state for walking the partitions is held in a class
442 /// with a nice interface for examining them.
445 /// \brief We need to keep the end of the slices to know when to stop.
446 AllocaSlices::iterator SE;
448 /// \brief We also need to keep track of the maximum split end offset seen.
449 /// FIXME: Do we really?
450 uint64_t MaxSplitSliceEndOffset = 0;
452 /// \brief Sets the partition to be empty at given iterator, and sets the
454 partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE)
456 // If not already at the end, advance our state to form the initial
462 /// \brief Advance the iterator to the next partition.
464 /// Requires that the iterator not be at the end of the slices.
466 assert((P.SI != SE || !P.SplitTails.empty()) &&
467 "Cannot advance past the end of the slices!");
469 // Clear out any split uses which have ended.
470 if (!P.SplitTails.empty()) {
471 if (P.EndOffset >= MaxSplitSliceEndOffset) {
472 // If we've finished all splits, this is easy.
473 P.SplitTails.clear();
474 MaxSplitSliceEndOffset = 0;
476 // Remove the uses which have ended in the prior partition. This
477 // cannot change the max split slice end because we just checked that
478 // the prior partition ended prior to that max.
479 P.SplitTails.erase(llvm::remove_if(P.SplitTails,
481 return S->endOffset() <=
485 assert(llvm::any_of(P.SplitTails,
487 return S->endOffset() == MaxSplitSliceEndOffset;
489 "Could not find the current max split slice offset!");
490 assert(llvm::all_of(P.SplitTails,
492 return S->endOffset() <= MaxSplitSliceEndOffset;
494 "Max split slice end offset is not actually the max!");
498 // If P.SI is already at the end, then we've cleared the split tail and
499 // now have an end iterator.
501 assert(P.SplitTails.empty() && "Failed to clear the split slices!");
505 // If we had a non-empty partition previously, set up the state for
506 // subsequent partitions.
508 // Accumulate all the splittable slices which started in the old
509 // partition into the split list.
511 if (S.isSplittable() && S.endOffset() > P.EndOffset) {
512 P.SplitTails.push_back(&S);
513 MaxSplitSliceEndOffset =
514 std::max(S.endOffset(), MaxSplitSliceEndOffset);
517 // Start from the end of the previous partition.
520 // If P.SI is now at the end, we at most have a tail of split slices.
522 P.BeginOffset = P.EndOffset;
523 P.EndOffset = MaxSplitSliceEndOffset;
527 // If the we have split slices and the next slice is after a gap and is
528 // not splittable immediately form an empty partition for the split
529 // slices up until the next slice begins.
530 if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset &&
531 !P.SI->isSplittable()) {
532 P.BeginOffset = P.EndOffset;
533 P.EndOffset = P.SI->beginOffset();
538 // OK, we need to consume new slices. Set the end offset based on the
539 // current slice, and step SJ past it. The beginning offset of the
540 // partition is the beginning offset of the next slice unless we have
541 // pre-existing split slices that are continuing, in which case we begin
542 // at the prior end offset.
543 P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset;
544 P.EndOffset = P.SI->endOffset();
547 // There are two strategies to form a partition based on whether the
548 // partition starts with an unsplittable slice or a splittable slice.
549 if (!P.SI->isSplittable()) {
550 // When we're forming an unsplittable region, it must always start at
551 // the first slice and will extend through its end.
552 assert(P.BeginOffset == P.SI->beginOffset());
554 // Form a partition including all of the overlapping slices with this
555 // unsplittable slice.
556 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) {
557 if (!P.SJ->isSplittable())
558 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset());
562 // We have a partition across a set of overlapping unsplittable
567 // If we're starting with a splittable slice, then we need to form
568 // a synthetic partition spanning it and any other overlapping splittable
570 assert(P.SI->isSplittable() && "Forming a splittable partition!");
572 // Collect all of the overlapping splittable slices.
573 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset &&
574 P.SJ->isSplittable()) {
575 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset());
579 // Back upiP.EndOffset if we ended the span early when encountering an
580 // unsplittable slice. This synthesizes the early end offset of
581 // a partition spanning only splittable slices.
582 if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) {
583 assert(!P.SJ->isSplittable());
584 P.EndOffset = P.SJ->beginOffset();
589 bool operator==(const partition_iterator &RHS) const {
590 assert(SE == RHS.SE &&
591 "End iterators don't match between compared partition iterators!");
593 // The observed positions of partitions is marked by the P.SI iterator and
594 // the emptiness of the split slices. The latter is only relevant when
595 // P.SI == SE, as the end iterator will additionally have an empty split
596 // slices list, but the prior may have the same P.SI and a tail of split
598 if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) {
599 assert(P.SJ == RHS.P.SJ &&
600 "Same set of slices formed two different sized partitions!");
601 assert(P.SplitTails.size() == RHS.P.SplitTails.size() &&
602 "Same slice position with differently sized non-empty split "
609 partition_iterator &operator++() {
614 Partition &operator*() { return P; }
617 /// \brief A forward range over the partitions of the alloca's slices.
619 /// This accesses an iterator range over the partitions of the alloca's
620 /// slices. It computes these partitions on the fly based on the overlapping
621 /// offsets of the slices and the ability to split them. It will visit "empty"
622 /// partitions to cover regions of the alloca only accessed via split
624 iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() {
625 return make_range(partition_iterator(begin(), end()),
626 partition_iterator(end(), end()));
629 static Value *foldSelectInst(SelectInst &SI) {
630 // If the condition being selected on is a constant or the same value is
631 // being selected between, fold the select. Yes this does (rarely) happen
633 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
634 return SI.getOperand(1 + CI->isZero());
635 if (SI.getOperand(1) == SI.getOperand(2))
636 return SI.getOperand(1);
641 /// \brief A helper that folds a PHI node or a select.
642 static Value *foldPHINodeOrSelectInst(Instruction &I) {
643 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
644 // If PN merges together the same value, return that value.
645 return PN->hasConstantValue();
647 return foldSelectInst(cast<SelectInst>(I));
650 /// \brief Builder for the alloca slices.
652 /// This class builds a set of alloca slices by recursively visiting the uses
653 /// of an alloca and making a slice for each load and store at each offset.
654 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
655 friend class PtrUseVisitor<SliceBuilder>;
656 friend class InstVisitor<SliceBuilder>;
658 using Base = PtrUseVisitor<SliceBuilder>;
660 const uint64_t AllocSize;
663 SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
664 SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes;
666 /// \brief Set to de-duplicate dead instructions found in the use walk.
667 SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
670 SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS)
671 : PtrUseVisitor<SliceBuilder>(DL),
672 AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), AS(AS) {}
675 void markAsDead(Instruction &I) {
676 if (VisitedDeadInsts.insert(&I).second)
677 AS.DeadUsers.push_back(&I);
680 void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
681 bool IsSplittable = false) {
682 // Completely skip uses which have a zero size or start either before or
683 // past the end of the allocation.
684 if (Size == 0 || Offset.uge(AllocSize)) {
685 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
686 << " which has zero size or starts outside of the "
687 << AllocSize << " byte alloca:\n"
688 << " alloca: " << AS.AI << "\n"
689 << " use: " << I << "\n");
690 return markAsDead(I);
693 uint64_t BeginOffset = Offset.getZExtValue();
694 uint64_t EndOffset = BeginOffset + Size;
696 // Clamp the end offset to the end of the allocation. Note that this is
697 // formulated to handle even the case where "BeginOffset + Size" overflows.
698 // This may appear superficially to be something we could ignore entirely,
699 // but that is not so! There may be widened loads or PHI-node uses where
700 // some instructions are dead but not others. We can't completely ignore
701 // them, and so have to record at least the information here.
702 assert(AllocSize >= BeginOffset); // Established above.
703 if (Size > AllocSize - BeginOffset) {
704 DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
705 << " to remain within the " << AllocSize << " byte alloca:\n"
706 << " alloca: " << AS.AI << "\n"
707 << " use: " << I << "\n");
708 EndOffset = AllocSize;
711 AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable));
714 void visitBitCastInst(BitCastInst &BC) {
716 return markAsDead(BC);
718 return Base::visitBitCastInst(BC);
721 void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
722 if (GEPI.use_empty())
723 return markAsDead(GEPI);
725 if (SROAStrictInbounds && GEPI.isInBounds()) {
726 // FIXME: This is a manually un-factored variant of the basic code inside
727 // of GEPs with checking of the inbounds invariant specified in the
728 // langref in a very strict sense. If we ever want to enable
729 // SROAStrictInbounds, this code should be factored cleanly into
730 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds
731 // by writing out the code here where we have the underlying allocation
732 // size readily available.
733 APInt GEPOffset = Offset;
734 const DataLayout &DL = GEPI.getModule()->getDataLayout();
735 for (gep_type_iterator GTI = gep_type_begin(GEPI),
736 GTE = gep_type_end(GEPI);
738 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
742 // Handle a struct index, which adds its field offset to the pointer.
743 if (StructType *STy = GTI.getStructTypeOrNull()) {
744 unsigned ElementIdx = OpC->getZExtValue();
745 const StructLayout *SL = DL.getStructLayout(STy);
747 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
749 // For array or vector indices, scale the index by the size of the
751 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
752 GEPOffset += Index * APInt(Offset.getBitWidth(),
753 DL.getTypeAllocSize(GTI.getIndexedType()));
756 // If this index has computed an intermediate pointer which is not
757 // inbounds, then the result of the GEP is a poison value and we can
758 // delete it and all uses.
759 if (GEPOffset.ugt(AllocSize))
760 return markAsDead(GEPI);
764 return Base::visitGetElementPtrInst(GEPI);
767 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
768 uint64_t Size, bool IsVolatile) {
769 // We allow splitting of non-volatile loads and stores where the type is an
770 // integer type. These may be used to implement 'memcpy' or other "transfer
771 // of bits" patterns.
772 bool IsSplittable = Ty->isIntegerTy() && !IsVolatile;
774 insertUse(I, Offset, Size, IsSplittable);
777 void visitLoadInst(LoadInst &LI) {
778 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
779 "All simple FCA loads should have been pre-split");
782 return PI.setAborted(&LI);
784 const DataLayout &DL = LI.getModule()->getDataLayout();
785 uint64_t Size = DL.getTypeStoreSize(LI.getType());
786 return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
789 void visitStoreInst(StoreInst &SI) {
790 Value *ValOp = SI.getValueOperand();
792 return PI.setEscapedAndAborted(&SI);
794 return PI.setAborted(&SI);
796 const DataLayout &DL = SI.getModule()->getDataLayout();
797 uint64_t Size = DL.getTypeStoreSize(ValOp->getType());
799 // If this memory access can be shown to *statically* extend outside the
800 // bounds of the allocation, it's behavior is undefined, so simply
801 // ignore it. Note that this is more strict than the generic clamping
802 // behavior of insertUse. We also try to handle cases which might run the
804 // FIXME: We should instead consider the pointer to have escaped if this
805 // function is being instrumented for addressing bugs or race conditions.
806 if (Size > AllocSize || Offset.ugt(AllocSize - Size)) {
807 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset
808 << " which extends past the end of the " << AllocSize
810 << " alloca: " << AS.AI << "\n"
811 << " use: " << SI << "\n");
812 return markAsDead(SI);
815 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
816 "All simple FCA stores should have been pre-split");
817 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
820 void visitMemSetInst(MemSetInst &II) {
821 assert(II.getRawDest() == *U && "Pointer use is not the destination?");
822 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
823 if ((Length && Length->getValue() == 0) ||
824 (IsOffsetKnown && Offset.uge(AllocSize)))
825 // Zero-length mem transfer intrinsics can be ignored entirely.
826 return markAsDead(II);
829 return PI.setAborted(&II);
831 insertUse(II, Offset, Length ? Length->getLimitedValue()
832 : AllocSize - Offset.getLimitedValue(),
836 void visitMemTransferInst(MemTransferInst &II) {
837 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
838 if (Length && Length->getValue() == 0)
839 // Zero-length mem transfer intrinsics can be ignored entirely.
840 return markAsDead(II);
842 // Because we can visit these intrinsics twice, also check to see if the
843 // first time marked this instruction as dead. If so, skip it.
844 if (VisitedDeadInsts.count(&II))
848 return PI.setAborted(&II);
850 // This side of the transfer is completely out-of-bounds, and so we can
851 // nuke the entire transfer. However, we also need to nuke the other side
852 // if already added to our partitions.
853 // FIXME: Yet another place we really should bypass this when
854 // instrumenting for ASan.
855 if (Offset.uge(AllocSize)) {
856 SmallDenseMap<Instruction *, unsigned>::iterator MTPI =
857 MemTransferSliceMap.find(&II);
858 if (MTPI != MemTransferSliceMap.end())
859 AS.Slices[MTPI->second].kill();
860 return markAsDead(II);
863 uint64_t RawOffset = Offset.getLimitedValue();
864 uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset;
866 // Check for the special case where the same exact value is used for both
868 if (*U == II.getRawDest() && *U == II.getRawSource()) {
869 // For non-volatile transfers this is a no-op.
870 if (!II.isVolatile())
871 return markAsDead(II);
873 return insertUse(II, Offset, Size, /*IsSplittable=*/false);
876 // If we have seen both source and destination for a mem transfer, then
877 // they both point to the same alloca.
879 SmallDenseMap<Instruction *, unsigned>::iterator MTPI;
880 std::tie(MTPI, Inserted) =
881 MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size()));
882 unsigned PrevIdx = MTPI->second;
884 Slice &PrevP = AS.Slices[PrevIdx];
886 // Check if the begin offsets match and this is a non-volatile transfer.
887 // In that case, we can completely elide the transfer.
888 if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) {
890 return markAsDead(II);
893 // Otherwise we have an offset transfer within the same alloca. We can't
895 PrevP.makeUnsplittable();
898 // Insert the use now that we've fixed up the splittable nature.
899 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
901 // Check that we ended up with a valid index in the map.
902 assert(AS.Slices[PrevIdx].getUse()->getUser() == &II &&
903 "Map index doesn't point back to a slice with this user.");
906 // Disable SRoA for any intrinsics except for lifetime invariants.
907 // FIXME: What about debug intrinsics? This matches old behavior, but
908 // doesn't make sense.
909 void visitIntrinsicInst(IntrinsicInst &II) {
911 return PI.setAborted(&II);
913 if (II.getIntrinsicID() == Intrinsic::lifetime_start ||
914 II.getIntrinsicID() == Intrinsic::lifetime_end) {
915 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
916 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
917 Length->getLimitedValue());
918 insertUse(II, Offset, Size, true);
922 Base::visitIntrinsicInst(II);
925 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
926 // We consider any PHI or select that results in a direct load or store of
927 // the same offset to be a viable use for slicing purposes. These uses
928 // are considered unsplittable and the size is the maximum loaded or stored
930 SmallPtrSet<Instruction *, 4> Visited;
931 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
932 Visited.insert(Root);
933 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
934 const DataLayout &DL = Root->getModule()->getDataLayout();
935 // If there are no loads or stores, the access is dead. We mark that as
936 // a size zero access.
939 Instruction *I, *UsedI;
940 std::tie(UsedI, I) = Uses.pop_back_val();
942 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
943 Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
946 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
947 Value *Op = SI->getOperand(0);
950 Size = std::max(Size, DL.getTypeStoreSize(Op->getType()));
954 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
955 if (!GEP->hasAllZeroIndices())
957 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
958 !isa<SelectInst>(I)) {
962 for (User *U : I->users())
963 if (Visited.insert(cast<Instruction>(U)).second)
964 Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
965 } while (!Uses.empty());
970 void visitPHINodeOrSelectInst(Instruction &I) {
971 assert(isa<PHINode>(I) || isa<SelectInst>(I));
973 return markAsDead(I);
975 // TODO: We could use SimplifyInstruction here to fold PHINodes and
976 // SelectInsts. However, doing so requires to change the current
977 // dead-operand-tracking mechanism. For instance, suppose neither loading
978 // from %U nor %other traps. Then "load (select undef, %U, %other)" does not
979 // trap either. However, if we simply replace %U with undef using the
980 // current dead-operand-tracking mechanism, "load (select undef, undef,
981 // %other)" may trap because the select may return the first operand
983 if (Value *Result = foldPHINodeOrSelectInst(I)) {
985 // If the result of the constant fold will be the pointer, recurse
986 // through the PHI/select as if we had RAUW'ed it.
989 // Otherwise the operand to the PHI/select is dead, and we can replace
991 AS.DeadOperands.push_back(U);
997 return PI.setAborted(&I);
999 // See if we already have computed info on this node.
1000 uint64_t &Size = PHIOrSelectSizes[&I];
1002 // This is a new PHI/Select, check for an unsafe use of it.
1003 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size))
1004 return PI.setAborted(UnsafeI);
1007 // For PHI and select operands outside the alloca, we can't nuke the entire
1008 // phi or select -- the other side might still be relevant, so we special
1009 // case them here and use a separate structure to track the operands
1010 // themselves which should be replaced with undef.
1011 // FIXME: This should instead be escaped in the event we're instrumenting
1012 // for address sanitization.
1013 if (Offset.uge(AllocSize)) {
1014 AS.DeadOperands.push_back(U);
1018 insertUse(I, Offset, Size);
1021 void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); }
1023 void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); }
1025 /// \brief Disable SROA entirely if there are unhandled users of the alloca.
1026 void visitInstruction(Instruction &I) { PI.setAborted(&I); }
1029 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
1031 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1034 PointerEscapingInstr(nullptr) {
1035 SliceBuilder PB(DL, AI, *this);
1036 SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
1037 if (PtrI.isEscaped() || PtrI.isAborted()) {
1038 // FIXME: We should sink the escape vs. abort info into the caller nicely,
1039 // possibly by just storing the PtrInfo in the AllocaSlices.
1040 PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
1041 : PtrI.getAbortingInst();
1042 assert(PointerEscapingInstr && "Did not track a bad instruction");
1047 llvm::remove_if(Slices, [](const Slice &S) { return S.isDead(); }),
1051 if (SROARandomShuffleSlices) {
1052 std::mt19937 MT(static_cast<unsigned>(
1053 std::chrono::system_clock::now().time_since_epoch().count()));
1054 std::shuffle(Slices.begin(), Slices.end(), MT);
1058 // Sort the uses. This arranges for the offsets to be in ascending order,
1059 // and the sizes to be in descending order.
1060 std::sort(Slices.begin(), Slices.end());
1063 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1065 void AllocaSlices::print(raw_ostream &OS, const_iterator I,
1066 StringRef Indent) const {
1067 printSlice(OS, I, Indent);
1069 printUse(OS, I, Indent);
1072 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
1073 StringRef Indent) const {
1074 OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
1075 << " slice #" << (I - begin())
1076 << (I->isSplittable() ? " (splittable)" : "");
1079 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
1080 StringRef Indent) const {
1081 OS << Indent << " used by: " << *I->getUse()->getUser() << "\n";
1084 void AllocaSlices::print(raw_ostream &OS) const {
1085 if (PointerEscapingInstr) {
1086 OS << "Can't analyze slices for alloca: " << AI << "\n"
1087 << " A pointer to this alloca escaped by:\n"
1088 << " " << *PointerEscapingInstr << "\n";
1092 OS << "Slices of alloca: " << AI << "\n";
1093 for (const_iterator I = begin(), E = end(); I != E; ++I)
1097 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const {
1100 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); }
1102 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1104 /// Walk the range of a partitioning looking for a common type to cover this
1105 /// sequence of slices.
1106 static Type *findCommonType(AllocaSlices::const_iterator B,
1107 AllocaSlices::const_iterator E,
1108 uint64_t EndOffset) {
1110 bool TyIsCommon = true;
1111 IntegerType *ITy = nullptr;
1113 // Note that we need to look at *every* alloca slice's Use to ensure we
1114 // always get consistent results regardless of the order of slices.
1115 for (AllocaSlices::const_iterator I = B; I != E; ++I) {
1116 Use *U = I->getUse();
1117 if (isa<IntrinsicInst>(*U->getUser()))
1119 if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
1122 Type *UserTy = nullptr;
1123 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1124 UserTy = LI->getType();
1125 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1126 UserTy = SI->getValueOperand()->getType();
1129 if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) {
1130 // If the type is larger than the partition, skip it. We only encounter
1131 // this for split integer operations where we want to use the type of the
1132 // entity causing the split. Also skip if the type is not a byte width
1134 if (UserITy->getBitWidth() % 8 != 0 ||
1135 UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
1138 // Track the largest bitwidth integer type used in this way in case there
1139 // is no common type.
1140 if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth())
1144 // To avoid depending on the order of slices, Ty and TyIsCommon must not
1145 // depend on types skipped above.
1146 if (!UserTy || (Ty && Ty != UserTy))
1147 TyIsCommon = false; // Give up on anything but an iN type.
1152 return TyIsCommon ? Ty : ITy;
1155 /// PHI instructions that use an alloca and are subsequently loaded can be
1156 /// rewritten to load both input pointers in the pred blocks and then PHI the
1157 /// results, allowing the load of the alloca to be promoted.
1159 /// %P2 = phi [i32* %Alloca, i32* %Other]
1160 /// %V = load i32* %P2
1162 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1164 /// %V2 = load i32* %Other
1166 /// %V = phi [i32 %V1, i32 %V2]
1168 /// We can do this to a select if its only uses are loads and if the operands
1169 /// to the select can be loaded unconditionally.
1171 /// FIXME: This should be hoisted into a generic utility, likely in
1172 /// Transforms/Util/Local.h
1173 static bool isSafePHIToSpeculate(PHINode &PN) {
1174 // For now, we can only do this promotion if the load is in the same block
1175 // as the PHI, and if there are no stores between the phi and load.
1176 // TODO: Allow recursive phi users.
1177 // TODO: Allow stores.
1178 BasicBlock *BB = PN.getParent();
1179 unsigned MaxAlign = 0;
1180 bool HaveLoad = false;
1181 for (User *U : PN.users()) {
1182 LoadInst *LI = dyn_cast<LoadInst>(U);
1183 if (!LI || !LI->isSimple())
1186 // For now we only allow loads in the same block as the PHI. This is
1187 // a common case that happens when instcombine merges two loads through
1189 if (LI->getParent() != BB)
1192 // Ensure that there are no instructions between the PHI and the load that
1194 for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI)
1195 if (BBI->mayWriteToMemory())
1198 MaxAlign = std::max(MaxAlign, LI->getAlignment());
1205 const DataLayout &DL = PN.getModule()->getDataLayout();
1207 // We can only transform this if it is safe to push the loads into the
1208 // predecessor blocks. The only thing to watch out for is that we can't put
1209 // a possibly trapping load in the predecessor if it is a critical edge.
1210 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1211 TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
1212 Value *InVal = PN.getIncomingValue(Idx);
1214 // If the value is produced by the terminator of the predecessor (an
1215 // invoke) or it has side-effects, there is no valid place to put a load
1216 // in the predecessor.
1217 if (TI == InVal || TI->mayHaveSideEffects())
1220 // If the predecessor has a single successor, then the edge isn't
1222 if (TI->getNumSuccessors() == 1)
1225 // If this pointer is always safe to load, or if we can prove that there
1226 // is already a load in the block, then we can move the load to the pred
1228 if (isSafeToLoadUnconditionally(InVal, MaxAlign, DL, TI))
1237 static void speculatePHINodeLoads(PHINode &PN) {
1238 DEBUG(dbgs() << " original: " << PN << "\n");
1240 Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
1241 IRBuilderTy PHIBuilder(&PN);
1242 PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
1243 PN.getName() + ".sroa.speculated");
1245 // Get the AA tags and alignment to use from one of the loads. It doesn't
1246 // matter which one we get and if any differ.
1247 LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
1250 SomeLoad->getAAMetadata(AATags);
1251 unsigned Align = SomeLoad->getAlignment();
1253 // Rewrite all loads of the PN to use the new PHI.
1254 while (!PN.use_empty()) {
1255 LoadInst *LI = cast<LoadInst>(PN.user_back());
1256 LI->replaceAllUsesWith(NewPN);
1257 LI->eraseFromParent();
1260 // Inject loads into all of the pred blocks.
1261 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1262 BasicBlock *Pred = PN.getIncomingBlock(Idx);
1263 TerminatorInst *TI = Pred->getTerminator();
1264 Value *InVal = PN.getIncomingValue(Idx);
1265 IRBuilderTy PredBuilder(TI);
1267 LoadInst *Load = PredBuilder.CreateLoad(
1268 InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
1269 ++NumLoadsSpeculated;
1270 Load->setAlignment(Align);
1272 Load->setAAMetadata(AATags);
1273 NewPN->addIncoming(Load, Pred);
1276 DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
1277 PN.eraseFromParent();
1280 /// Select instructions that use an alloca and are subsequently loaded can be
1281 /// rewritten to load both input pointers and then select between the result,
1282 /// allowing the load of the alloca to be promoted.
1284 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1285 /// %V = load i32* %P2
1287 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1288 /// %V2 = load i32* %Other
1289 /// %V = select i1 %cond, i32 %V1, i32 %V2
1291 /// We can do this to a select if its only uses are loads and if the operand
1292 /// to the select can be loaded unconditionally.
1293 static bool isSafeSelectToSpeculate(SelectInst &SI) {
1294 Value *TValue = SI.getTrueValue();
1295 Value *FValue = SI.getFalseValue();
1296 const DataLayout &DL = SI.getModule()->getDataLayout();
1298 for (User *U : SI.users()) {
1299 LoadInst *LI = dyn_cast<LoadInst>(U);
1300 if (!LI || !LI->isSimple())
1303 // Both operands to the select need to be dereferenceable, either
1304 // absolutely (e.g. allocas) or at this point because we can see other
1306 if (!isSafeToLoadUnconditionally(TValue, LI->getAlignment(), DL, LI))
1308 if (!isSafeToLoadUnconditionally(FValue, LI->getAlignment(), DL, LI))
1315 static void speculateSelectInstLoads(SelectInst &SI) {
1316 DEBUG(dbgs() << " original: " << SI << "\n");
1318 IRBuilderTy IRB(&SI);
1319 Value *TV = SI.getTrueValue();
1320 Value *FV = SI.getFalseValue();
1321 // Replace the loads of the select with a select of two loads.
1322 while (!SI.use_empty()) {
1323 LoadInst *LI = cast<LoadInst>(SI.user_back());
1324 assert(LI->isSimple() && "We only speculate simple loads");
1326 IRB.SetInsertPoint(LI);
1328 IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
1330 IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
1331 NumLoadsSpeculated += 2;
1333 // Transfer alignment and AA info if present.
1334 TL->setAlignment(LI->getAlignment());
1335 FL->setAlignment(LI->getAlignment());
1338 LI->getAAMetadata(Tags);
1340 TL->setAAMetadata(Tags);
1341 FL->setAAMetadata(Tags);
1344 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
1345 LI->getName() + ".sroa.speculated");
1347 DEBUG(dbgs() << " speculated to: " << *V << "\n");
1348 LI->replaceAllUsesWith(V);
1349 LI->eraseFromParent();
1351 SI.eraseFromParent();
1354 /// \brief Build a GEP out of a base pointer and indices.
1356 /// This will return the BasePtr if that is valid, or build a new GEP
1357 /// instruction using the IRBuilder if GEP-ing is needed.
1358 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
1359 SmallVectorImpl<Value *> &Indices, Twine NamePrefix) {
1360 if (Indices.empty())
1363 // A single zero index is a no-op, so check for this and avoid building a GEP
1365 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
1368 return IRB.CreateInBoundsGEP(nullptr, BasePtr, Indices,
1369 NamePrefix + "sroa_idx");
1372 /// \brief Get a natural GEP off of the BasePtr walking through Ty toward
1373 /// TargetTy without changing the offset of the pointer.
1375 /// This routine assumes we've already established a properly offset GEP with
1376 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with
1377 /// zero-indices down through type layers until we find one the same as
1378 /// TargetTy. If we can't find one with the same type, we at least try to use
1379 /// one with the same size. If none of that works, we just produce the GEP as
1380 /// indicated by Indices to have the correct offset.
1381 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
1382 Value *BasePtr, Type *Ty, Type *TargetTy,
1383 SmallVectorImpl<Value *> &Indices,
1386 return buildGEP(IRB, BasePtr, Indices, NamePrefix);
1388 // Pointer size to use for the indices.
1389 unsigned PtrSize = DL.getPointerTypeSizeInBits(BasePtr->getType());
1391 // See if we can descend into a struct and locate a field with the correct
1393 unsigned NumLayers = 0;
1394 Type *ElementTy = Ty;
1396 if (ElementTy->isPointerTy())
1399 if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) {
1400 ElementTy = ArrayTy->getElementType();
1401 Indices.push_back(IRB.getIntN(PtrSize, 0));
1402 } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) {
1403 ElementTy = VectorTy->getElementType();
1404 Indices.push_back(IRB.getInt32(0));
1405 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
1406 if (STy->element_begin() == STy->element_end())
1407 break; // Nothing left to descend into.
1408 ElementTy = *STy->element_begin();
1409 Indices.push_back(IRB.getInt32(0));
1414 } while (ElementTy != TargetTy);
1415 if (ElementTy != TargetTy)
1416 Indices.erase(Indices.end() - NumLayers, Indices.end());
1418 return buildGEP(IRB, BasePtr, Indices, NamePrefix);
1421 /// \brief Recursively compute indices for a natural GEP.
1423 /// This is the recursive step for getNaturalGEPWithOffset that walks down the
1424 /// element types adding appropriate indices for the GEP.
1425 static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
1426 Value *Ptr, Type *Ty, APInt &Offset,
1428 SmallVectorImpl<Value *> &Indices,
1431 return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices,
1434 // We can't recurse through pointer types.
1435 if (Ty->isPointerTy())
1438 // We try to analyze GEPs over vectors here, but note that these GEPs are
1439 // extremely poorly defined currently. The long-term goal is to remove GEPing
1440 // over a vector from the IR completely.
1441 if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
1442 unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType());
1443 if (ElementSizeInBits % 8 != 0) {
1444 // GEPs over non-multiple of 8 size vector elements are invalid.
1447 APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
1448 APInt NumSkippedElements = Offset.sdiv(ElementSize);
1449 if (NumSkippedElements.ugt(VecTy->getNumElements()))
1451 Offset -= NumSkippedElements * ElementSize;
1452 Indices.push_back(IRB.getInt(NumSkippedElements));
1453 return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(),
1454 Offset, TargetTy, Indices, NamePrefix);
1457 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
1458 Type *ElementTy = ArrTy->getElementType();
1459 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
1460 APInt NumSkippedElements = Offset.sdiv(ElementSize);
1461 if (NumSkippedElements.ugt(ArrTy->getNumElements()))
1464 Offset -= NumSkippedElements * ElementSize;
1465 Indices.push_back(IRB.getInt(NumSkippedElements));
1466 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1467 Indices, NamePrefix);
1470 StructType *STy = dyn_cast<StructType>(Ty);
1474 const StructLayout *SL = DL.getStructLayout(STy);
1475 uint64_t StructOffset = Offset.getZExtValue();
1476 if (StructOffset >= SL->getSizeInBytes())
1478 unsigned Index = SL->getElementContainingOffset(StructOffset);
1479 Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
1480 Type *ElementTy = STy->getElementType(Index);
1481 if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
1482 return nullptr; // The offset points into alignment padding.
1484 Indices.push_back(IRB.getInt32(Index));
1485 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1486 Indices, NamePrefix);
1489 /// \brief Get a natural GEP from a base pointer to a particular offset and
1490 /// resulting in a particular type.
1492 /// The goal is to produce a "natural" looking GEP that works with the existing
1493 /// composite types to arrive at the appropriate offset and element type for
1494 /// a pointer. TargetTy is the element type the returned GEP should point-to if
1495 /// possible. We recurse by decreasing Offset, adding the appropriate index to
1496 /// Indices, and setting Ty to the result subtype.
1498 /// If no natural GEP can be constructed, this function returns null.
1499 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
1500 Value *Ptr, APInt Offset, Type *TargetTy,
1501 SmallVectorImpl<Value *> &Indices,
1503 PointerType *Ty = cast<PointerType>(Ptr->getType());
1505 // Don't consider any GEPs through an i8* as natural unless the TargetTy is
1507 if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
1510 Type *ElementTy = Ty->getElementType();
1511 if (!ElementTy->isSized())
1512 return nullptr; // We can't GEP through an unsized element.
1513 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
1514 if (ElementSize == 0)
1515 return nullptr; // Zero-length arrays can't help us build a natural GEP.
1516 APInt NumSkippedElements = Offset.sdiv(ElementSize);
1518 Offset -= NumSkippedElements * ElementSize;
1519 Indices.push_back(IRB.getInt(NumSkippedElements));
1520 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1521 Indices, NamePrefix);
1524 /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
1525 /// resulting pointer has PointerTy.
1527 /// This tries very hard to compute a "natural" GEP which arrives at the offset
1528 /// and produces the pointer type desired. Where it cannot, it will try to use
1529 /// the natural GEP to arrive at the offset and bitcast to the type. Where that
1530 /// fails, it will try to use an existing i8* and GEP to the byte offset and
1531 /// bitcast to the type.
1533 /// The strategy for finding the more natural GEPs is to peel off layers of the
1534 /// pointer, walking back through bit casts and GEPs, searching for a base
1535 /// pointer from which we can compute a natural GEP with the desired
1536 /// properties. The algorithm tries to fold as many constant indices into
1537 /// a single GEP as possible, thus making each GEP more independent of the
1538 /// surrounding code.
1539 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
1540 APInt Offset, Type *PointerTy, Twine NamePrefix) {
1541 // Even though we don't look through PHI nodes, we could be called on an
1542 // instruction in an unreachable block, which may be on a cycle.
1543 SmallPtrSet<Value *, 4> Visited;
1544 Visited.insert(Ptr);
1545 SmallVector<Value *, 4> Indices;
1547 // We may end up computing an offset pointer that has the wrong type. If we
1548 // never are able to compute one directly that has the correct type, we'll
1549 // fall back to it, so keep it and the base it was computed from around here.
1550 Value *OffsetPtr = nullptr;
1551 Value *OffsetBasePtr;
1553 // Remember any i8 pointer we come across to re-use if we need to do a raw
1555 Value *Int8Ptr = nullptr;
1556 APInt Int8PtrOffset(Offset.getBitWidth(), 0);
1558 Type *TargetTy = PointerTy->getPointerElementType();
1561 // First fold any existing GEPs into the offset.
1562 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
1563 APInt GEPOffset(Offset.getBitWidth(), 0);
1564 if (!GEP->accumulateConstantOffset(DL, GEPOffset))
1566 Offset += GEPOffset;
1567 Ptr = GEP->getPointerOperand();
1568 if (!Visited.insert(Ptr).second)
1572 // See if we can perform a natural GEP here.
1574 if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
1575 Indices, NamePrefix)) {
1576 // If we have a new natural pointer at the offset, clear out any old
1577 // offset pointer we computed. Unless it is the base pointer or
1578 // a non-instruction, we built a GEP we don't need. Zap it.
1579 if (OffsetPtr && OffsetPtr != OffsetBasePtr)
1580 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) {
1581 assert(I->use_empty() && "Built a GEP with uses some how!");
1582 I->eraseFromParent();
1585 OffsetBasePtr = Ptr;
1586 // If we also found a pointer of the right type, we're done.
1587 if (P->getType() == PointerTy)
1591 // Stash this pointer if we've found an i8*.
1592 if (Ptr->getType()->isIntegerTy(8)) {
1594 Int8PtrOffset = Offset;
1597 // Peel off a layer of the pointer and update the offset appropriately.
1598 if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
1599 Ptr = cast<Operator>(Ptr)->getOperand(0);
1600 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
1601 if (GA->isInterposable())
1603 Ptr = GA->getAliasee();
1607 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
1608 } while (Visited.insert(Ptr).second);
1612 Int8Ptr = IRB.CreateBitCast(
1613 Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()),
1614 NamePrefix + "sroa_raw_cast");
1615 Int8PtrOffset = Offset;
1618 OffsetPtr = Int8PtrOffset == 0
1620 : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr,
1621 IRB.getInt(Int8PtrOffset),
1622 NamePrefix + "sroa_raw_idx");
1626 // On the off chance we were targeting i8*, guard the bitcast here.
1627 if (Ptr->getType() != PointerTy)
1628 Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast");
1633 /// \brief Compute the adjusted alignment for a load or store from an offset.
1634 static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset,
1635 const DataLayout &DL) {
1638 if (auto *LI = dyn_cast<LoadInst>(I)) {
1639 Alignment = LI->getAlignment();
1641 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
1642 Alignment = SI->getAlignment();
1643 Ty = SI->getValueOperand()->getType();
1645 llvm_unreachable("Only loads and stores are allowed!");
1649 Alignment = DL.getABITypeAlignment(Ty);
1651 return MinAlign(Alignment, Offset);
1654 /// \brief Test whether we can convert a value from the old to the new type.
1656 /// This predicate should be used to guard calls to convertValue in order to
1657 /// ensure that we only try to convert viable values. The strategy is that we
1658 /// will peel off single element struct and array wrappings to get to an
1659 /// underlying value, and convert that value.
1660 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
1664 // For integer types, we can't handle any bit-width differences. This would
1665 // break both vector conversions with extension and introduce endianness
1666 // issues when in conjunction with loads and stores.
1667 if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) {
1668 assert(cast<IntegerType>(OldTy)->getBitWidth() !=
1669 cast<IntegerType>(NewTy)->getBitWidth() &&
1670 "We can't have the same bitwidth for different int types");
1674 if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy))
1676 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
1679 // We can convert pointers to integers and vice-versa. Same for vectors
1680 // of pointers and integers.
1681 OldTy = OldTy->getScalarType();
1682 NewTy = NewTy->getScalarType();
1683 if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
1684 if (NewTy->isPointerTy() && OldTy->isPointerTy()) {
1685 return cast<PointerType>(NewTy)->getPointerAddressSpace() ==
1686 cast<PointerType>(OldTy)->getPointerAddressSpace();
1689 // We can convert integers to integral pointers, but not to non-integral
1691 if (OldTy->isIntegerTy())
1692 return !DL.isNonIntegralPointerType(NewTy);
1694 // We can convert integral pointers to integers, but non-integral pointers
1695 // need to remain pointers.
1696 if (!DL.isNonIntegralPointerType(OldTy))
1697 return NewTy->isIntegerTy();
1705 /// \brief Generic routine to convert an SSA value to a value of a different
1708 /// This will try various different casting techniques, such as bitcasts,
1709 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
1710 /// two types for viability with this routine.
1711 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
1713 Type *OldTy = V->getType();
1714 assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type");
1719 assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) &&
1720 "Integer types must be the exact same to convert.");
1722 // See if we need inttoptr for this type pair. A cast involving both scalars
1723 // and vectors requires and additional bitcast.
1724 if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) {
1725 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
1726 if (OldTy->isVectorTy() && !NewTy->isVectorTy())
1727 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1730 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
1731 if (!OldTy->isVectorTy() && NewTy->isVectorTy())
1732 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1735 return IRB.CreateIntToPtr(V, NewTy);
1738 // See if we need ptrtoint for this type pair. A cast involving both scalars
1739 // and vectors requires and additional bitcast.
1740 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) {
1741 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
1742 if (OldTy->isVectorTy() && !NewTy->isVectorTy())
1743 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1746 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
1747 if (!OldTy->isVectorTy() && NewTy->isVectorTy())
1748 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1751 return IRB.CreatePtrToInt(V, NewTy);
1754 return IRB.CreateBitCast(V, NewTy);
1757 /// \brief Test whether the given slice use can be promoted to a vector.
1759 /// This function is called to test each entry in a partition which is slated
1760 /// for a single slice.
1761 static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
1763 uint64_t ElementSize,
1764 const DataLayout &DL) {
1765 // First validate the slice offsets.
1766 uint64_t BeginOffset =
1767 std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset();
1768 uint64_t BeginIndex = BeginOffset / ElementSize;
1769 if (BeginIndex * ElementSize != BeginOffset ||
1770 BeginIndex >= Ty->getNumElements())
1772 uint64_t EndOffset =
1773 std::min(S.endOffset(), P.endOffset()) - P.beginOffset();
1774 uint64_t EndIndex = EndOffset / ElementSize;
1775 if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements())
1778 assert(EndIndex > BeginIndex && "Empty vector!");
1779 uint64_t NumElements = EndIndex - BeginIndex;
1780 Type *SliceTy = (NumElements == 1)
1781 ? Ty->getElementType()
1782 : VectorType::get(Ty->getElementType(), NumElements);
1785 Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
1787 Use *U = S.getUse();
1789 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
1790 if (MI->isVolatile())
1792 if (!S.isSplittable())
1793 return false; // Skip any unsplittable intrinsics.
1794 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
1795 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
1796 II->getIntrinsicID() != Intrinsic::lifetime_end)
1798 } else if (U->get()->getType()->getPointerElementType()->isStructTy()) {
1799 // Disable vector promotion when there are loads or stores of an FCA.
1801 } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1802 if (LI->isVolatile())
1804 Type *LTy = LI->getType();
1805 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
1806 assert(LTy->isIntegerTy());
1809 if (!canConvertValue(DL, SliceTy, LTy))
1811 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1812 if (SI->isVolatile())
1814 Type *STy = SI->getValueOperand()->getType();
1815 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
1816 assert(STy->isIntegerTy());
1819 if (!canConvertValue(DL, STy, SliceTy))
1828 /// \brief Test whether the given alloca partitioning and range of slices can be
1829 /// promoted to a vector.
1831 /// This is a quick test to check whether we can rewrite a particular alloca
1832 /// partition (and its newly formed alloca) into a vector alloca with only
1833 /// whole-vector loads and stores such that it could be promoted to a vector
1834 /// SSA value. We only can ensure this for a limited set of operations, and we
1835 /// don't want to do the rewrites unless we are confident that the result will
1836 /// be promotable, so we have an early test here.
1837 static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
1838 // Collect the candidate types for vector-based promotion. Also track whether
1839 // we have different element types.
1840 SmallVector<VectorType *, 4> CandidateTys;
1841 Type *CommonEltTy = nullptr;
1842 bool HaveCommonEltTy = true;
1843 auto CheckCandidateType = [&](Type *Ty) {
1844 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1845 CandidateTys.push_back(VTy);
1847 CommonEltTy = VTy->getElementType();
1848 else if (CommonEltTy != VTy->getElementType())
1849 HaveCommonEltTy = false;
1852 // Consider any loads or stores that are the exact size of the slice.
1853 for (const Slice &S : P)
1854 if (S.beginOffset() == P.beginOffset() &&
1855 S.endOffset() == P.endOffset()) {
1856 if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser()))
1857 CheckCandidateType(LI->getType());
1858 else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser()))
1859 CheckCandidateType(SI->getValueOperand()->getType());
1862 // If we didn't find a vector type, nothing to do here.
1863 if (CandidateTys.empty())
1866 // Remove non-integer vector types if we had multiple common element types.
1867 // FIXME: It'd be nice to replace them with integer vector types, but we can't
1868 // do that until all the backends are known to produce good code for all
1869 // integer vector types.
1870 if (!HaveCommonEltTy) {
1872 llvm::remove_if(CandidateTys,
1873 [](VectorType *VTy) {
1874 return !VTy->getElementType()->isIntegerTy();
1876 CandidateTys.end());
1878 // If there were no integer vector types, give up.
1879 if (CandidateTys.empty())
1882 // Rank the remaining candidate vector types. This is easy because we know
1883 // they're all integer vectors. We sort by ascending number of elements.
1884 auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) {
1886 assert(DL.getTypeSizeInBits(RHSTy) == DL.getTypeSizeInBits(LHSTy) &&
1887 "Cannot have vector types of different sizes!");
1888 assert(RHSTy->getElementType()->isIntegerTy() &&
1889 "All non-integer types eliminated!");
1890 assert(LHSTy->getElementType()->isIntegerTy() &&
1891 "All non-integer types eliminated!");
1892 return RHSTy->getNumElements() < LHSTy->getNumElements();
1894 std::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes);
1896 std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes),
1897 CandidateTys.end());
1899 // The only way to have the same element type in every vector type is to
1900 // have the same vector type. Check that and remove all but one.
1902 for (VectorType *VTy : CandidateTys) {
1903 assert(VTy->getElementType() == CommonEltTy &&
1904 "Unaccounted for element type!");
1905 assert(VTy == CandidateTys[0] &&
1906 "Different vector types with the same element type!");
1909 CandidateTys.resize(1);
1912 // Try each vector type, and return the one which works.
1913 auto CheckVectorTypeForPromotion = [&](VectorType *VTy) {
1914 uint64_t ElementSize = DL.getTypeSizeInBits(VTy->getElementType());
1916 // While the definition of LLVM vectors is bitpacked, we don't support sizes
1917 // that aren't byte sized.
1918 if (ElementSize % 8)
1920 assert((DL.getTypeSizeInBits(VTy) % 8) == 0 &&
1921 "vector size not a multiple of element size?");
1924 for (const Slice &S : P)
1925 if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL))
1928 for (const Slice *S : P.splitSliceTails())
1929 if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL))
1934 for (VectorType *VTy : CandidateTys)
1935 if (CheckVectorTypeForPromotion(VTy))
1941 /// \brief Test whether a slice of an alloca is valid for integer widening.
1943 /// This implements the necessary checking for the \c isIntegerWideningViable
1944 /// test below on a single slice of the alloca.
1945 static bool isIntegerWideningViableForSlice(const Slice &S,
1946 uint64_t AllocBeginOffset,
1948 const DataLayout &DL,
1949 bool &WholeAllocaOp) {
1950 uint64_t Size = DL.getTypeStoreSize(AllocaTy);
1952 uint64_t RelBegin = S.beginOffset() - AllocBeginOffset;
1953 uint64_t RelEnd = S.endOffset() - AllocBeginOffset;
1955 // We can't reasonably handle cases where the load or store extends past
1956 // the end of the alloca's type and into its padding.
1960 Use *U = S.getUse();
1962 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1963 if (LI->isVolatile())
1965 // We can't handle loads that extend past the allocated memory.
1966 if (DL.getTypeStoreSize(LI->getType()) > Size)
1968 // Note that we don't count vector loads or stores as whole-alloca
1969 // operations which enable integer widening because we would prefer to use
1970 // vector widening instead.
1971 if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size)
1972 WholeAllocaOp = true;
1973 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
1974 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
1976 } else if (RelBegin != 0 || RelEnd != Size ||
1977 !canConvertValue(DL, AllocaTy, LI->getType())) {
1978 // Non-integer loads need to be convertible from the alloca type so that
1979 // they are promotable.
1982 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1983 Type *ValueTy = SI->getValueOperand()->getType();
1984 if (SI->isVolatile())
1986 // We can't handle stores that extend past the allocated memory.
1987 if (DL.getTypeStoreSize(ValueTy) > Size)
1989 // Note that we don't count vector loads or stores as whole-alloca
1990 // operations which enable integer widening because we would prefer to use
1991 // vector widening instead.
1992 if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size)
1993 WholeAllocaOp = true;
1994 if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
1995 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
1997 } else if (RelBegin != 0 || RelEnd != Size ||
1998 !canConvertValue(DL, ValueTy, AllocaTy)) {
1999 // Non-integer stores need to be convertible to the alloca type so that
2000 // they are promotable.
2003 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
2004 if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
2006 if (!S.isSplittable())
2007 return false; // Skip any unsplittable intrinsics.
2008 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
2009 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
2010 II->getIntrinsicID() != Intrinsic::lifetime_end)
2019 /// \brief Test whether the given alloca partition's integer operations can be
2020 /// widened to promotable ones.
2022 /// This is a quick test to check whether we can rewrite the integer loads and
2023 /// stores to a particular alloca into wider loads and stores and be able to
2024 /// promote the resulting alloca.
2025 static bool isIntegerWideningViable(Partition &P, Type *AllocaTy,
2026 const DataLayout &DL) {
2027 uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy);
2028 // Don't create integer types larger than the maximum bitwidth.
2029 if (SizeInBits > IntegerType::MAX_INT_BITS)
2032 // Don't try to handle allocas with bit-padding.
2033 if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy))
2036 // We need to ensure that an integer type with the appropriate bitwidth can
2037 // be converted to the alloca type, whatever that is. We don't want to force
2038 // the alloca itself to have an integer type if there is a more suitable one.
2039 Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
2040 if (!canConvertValue(DL, AllocaTy, IntTy) ||
2041 !canConvertValue(DL, IntTy, AllocaTy))
2044 // While examining uses, we ensure that the alloca has a covering load or
2045 // store. We don't want to widen the integer operations only to fail to
2046 // promote due to some other unsplittable entry (which we may make splittable
2047 // later). However, if there are only splittable uses, go ahead and assume
2048 // that we cover the alloca.
2049 // FIXME: We shouldn't consider split slices that happen to start in the
2050 // partition here...
2051 bool WholeAllocaOp =
2052 P.begin() != P.end() ? false : DL.isLegalInteger(SizeInBits);
2054 for (const Slice &S : P)
2055 if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL,
2059 for (const Slice *S : P.splitSliceTails())
2060 if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL,
2064 return WholeAllocaOp;
2067 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
2068 IntegerType *Ty, uint64_t Offset,
2069 const Twine &Name) {
2070 DEBUG(dbgs() << " start: " << *V << "\n");
2071 IntegerType *IntTy = cast<IntegerType>(V->getType());
2072 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
2073 "Element extends past full value");
2074 uint64_t ShAmt = 8 * Offset;
2075 if (DL.isBigEndian())
2076 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
2078 V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
2079 DEBUG(dbgs() << " shifted: " << *V << "\n");
2081 assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
2082 "Cannot extract to a larger integer!");
2084 V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
2085 DEBUG(dbgs() << " trunced: " << *V << "\n");
2090 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
2091 Value *V, uint64_t Offset, const Twine &Name) {
2092 IntegerType *IntTy = cast<IntegerType>(Old->getType());
2093 IntegerType *Ty = cast<IntegerType>(V->getType());
2094 assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
2095 "Cannot insert a larger integer!");
2096 DEBUG(dbgs() << " start: " << *V << "\n");
2098 V = IRB.CreateZExt(V, IntTy, Name + ".ext");
2099 DEBUG(dbgs() << " extended: " << *V << "\n");
2101 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
2102 "Element store outside of alloca store");
2103 uint64_t ShAmt = 8 * Offset;
2104 if (DL.isBigEndian())
2105 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
2107 V = IRB.CreateShl(V, ShAmt, Name + ".shift");
2108 DEBUG(dbgs() << " shifted: " << *V << "\n");
2111 if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
2112 APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
2113 Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
2114 DEBUG(dbgs() << " masked: " << *Old << "\n");
2115 V = IRB.CreateOr(Old, V, Name + ".insert");
2116 DEBUG(dbgs() << " inserted: " << *V << "\n");
2121 static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex,
2122 unsigned EndIndex, const Twine &Name) {
2123 VectorType *VecTy = cast<VectorType>(V->getType());
2124 unsigned NumElements = EndIndex - BeginIndex;
2125 assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2127 if (NumElements == VecTy->getNumElements())
2130 if (NumElements == 1) {
2131 V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex),
2133 DEBUG(dbgs() << " extract: " << *V << "\n");
2137 SmallVector<Constant *, 8> Mask;
2138 Mask.reserve(NumElements);
2139 for (unsigned i = BeginIndex; i != EndIndex; ++i)
2140 Mask.push_back(IRB.getInt32(i));
2141 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
2142 ConstantVector::get(Mask), Name + ".extract");
2143 DEBUG(dbgs() << " shuffle: " << *V << "\n");
2147 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
2148 unsigned BeginIndex, const Twine &Name) {
2149 VectorType *VecTy = cast<VectorType>(Old->getType());
2150 assert(VecTy && "Can only insert a vector into a vector");
2152 VectorType *Ty = dyn_cast<VectorType>(V->getType());
2154 // Single element to insert.
2155 V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex),
2157 DEBUG(dbgs() << " insert: " << *V << "\n");
2161 assert(Ty->getNumElements() <= VecTy->getNumElements() &&
2162 "Too many elements!");
2163 if (Ty->getNumElements() == VecTy->getNumElements()) {
2164 assert(V->getType() == VecTy && "Vector type mismatch");
2167 unsigned EndIndex = BeginIndex + Ty->getNumElements();
2169 // When inserting a smaller vector into the larger to store, we first
2170 // use a shuffle vector to widen it with undef elements, and then
2171 // a second shuffle vector to select between the loaded vector and the
2173 SmallVector<Constant *, 8> Mask;
2174 Mask.reserve(VecTy->getNumElements());
2175 for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
2176 if (i >= BeginIndex && i < EndIndex)
2177 Mask.push_back(IRB.getInt32(i - BeginIndex));
2179 Mask.push_back(UndefValue::get(IRB.getInt32Ty()));
2180 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
2181 ConstantVector::get(Mask), Name + ".expand");
2182 DEBUG(dbgs() << " shuffle: " << *V << "\n");
2185 for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
2186 Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex));
2188 V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend");
2190 DEBUG(dbgs() << " blend: " << *V << "\n");
2194 /// \brief Visitor to rewrite instructions using p particular slice of an alloca
2195 /// to use a new alloca.
2197 /// Also implements the rewriting to vector-based accesses when the partition
2198 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic
2200 class llvm::sroa::AllocaSliceRewriter
2201 : public InstVisitor<AllocaSliceRewriter, bool> {
2202 // Befriend the base class so it can delegate to private visit methods.
2203 friend class InstVisitor<AllocaSliceRewriter, bool>;
2205 using Base = InstVisitor<AllocaSliceRewriter, bool>;
2207 const DataLayout &DL;
2210 AllocaInst &OldAI, &NewAI;
2211 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
2214 // This is a convenience and flag variable that will be null unless the new
2215 // alloca's integer operations should be widened to this integer type due to
2216 // passing isIntegerWideningViable above. If it is non-null, the desired
2217 // integer type will be stored here for easy access during rewriting.
2220 // If we are rewriting an alloca partition which can be written as pure
2221 // vector operations, we stash extra information here. When VecTy is
2222 // non-null, we have some strict guarantees about the rewritten alloca:
2223 // - The new alloca is exactly the size of the vector type here.
2224 // - The accesses all either map to the entire vector or to a single
2226 // - The set of accessing instructions is only one of those handled above
2227 // in isVectorPromotionViable. Generally these are the same access kinds
2228 // which are promotable via mem2reg.
2231 uint64_t ElementSize;
2233 // The original offset of the slice currently being rewritten relative to
2234 // the original alloca.
2235 uint64_t BeginOffset = 0;
2236 uint64_t EndOffset = 0;
2238 // The new offsets of the slice currently being rewritten relative to the
2240 uint64_t NewBeginOffset, NewEndOffset;
2243 bool IsSplittable = false;
2244 bool IsSplit = false;
2245 Use *OldUse = nullptr;
2246 Instruction *OldPtr = nullptr;
2248 // Track post-rewrite users which are PHI nodes and Selects.
2249 SmallSetVector<PHINode *, 8> &PHIUsers;
2250 SmallSetVector<SelectInst *, 8> &SelectUsers;
2252 // Utility IR builder, whose name prefix is setup for each visited use, and
2253 // the insertion point is set to point to the user.
2257 AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass,
2258 AllocaInst &OldAI, AllocaInst &NewAI,
2259 uint64_t NewAllocaBeginOffset,
2260 uint64_t NewAllocaEndOffset, bool IsIntegerPromotable,
2261 VectorType *PromotableVecTy,
2262 SmallSetVector<PHINode *, 8> &PHIUsers,
2263 SmallSetVector<SelectInst *, 8> &SelectUsers)
2264 : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
2265 NewAllocaBeginOffset(NewAllocaBeginOffset),
2266 NewAllocaEndOffset(NewAllocaEndOffset),
2267 NewAllocaTy(NewAI.getAllocatedType()),
2268 IntTy(IsIntegerPromotable
2271 DL.getTypeSizeInBits(NewAI.getAllocatedType()))
2273 VecTy(PromotableVecTy),
2274 ElementTy(VecTy ? VecTy->getElementType() : nullptr),
2275 ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0),
2276 PHIUsers(PHIUsers), SelectUsers(SelectUsers),
2277 IRB(NewAI.getContext(), ConstantFolder()) {
2279 assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 &&
2280 "Only multiple-of-8 sized vector elements are viable");
2283 assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy));
2286 bool visit(AllocaSlices::const_iterator I) {
2287 bool CanSROA = true;
2288 BeginOffset = I->beginOffset();
2289 EndOffset = I->endOffset();
2290 IsSplittable = I->isSplittable();
2292 BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
2293 DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : ""));
2294 DEBUG(AS.printSlice(dbgs(), I, ""));
2295 DEBUG(dbgs() << "\n");
2297 // Compute the intersecting offset range.
2298 assert(BeginOffset < NewAllocaEndOffset);
2299 assert(EndOffset > NewAllocaBeginOffset);
2300 NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2301 NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2303 SliceSize = NewEndOffset - NewBeginOffset;
2305 OldUse = I->getUse();
2306 OldPtr = cast<Instruction>(OldUse->get());
2308 Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
2309 IRB.SetInsertPoint(OldUserI);
2310 IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
2311 IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + ".");
2313 CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
2320 // Make sure the other visit overloads are visible.
2323 // Every instruction which can end up as a user must have a rewrite rule.
2324 bool visitInstruction(Instruction &I) {
2325 DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
2326 llvm_unreachable("No rewrite rule for this instruction!");
2329 Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) {
2330 // Note that the offset computation can use BeginOffset or NewBeginOffset
2331 // interchangeably for unsplit slices.
2332 assert(IsSplit || BeginOffset == NewBeginOffset);
2333 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2336 StringRef OldName = OldPtr->getName();
2337 // Skip through the last '.sroa.' component of the name.
2338 size_t LastSROAPrefix = OldName.rfind(".sroa.");
2339 if (LastSROAPrefix != StringRef::npos) {
2340 OldName = OldName.substr(LastSROAPrefix + strlen(".sroa."));
2341 // Look for an SROA slice index.
2342 size_t IndexEnd = OldName.find_first_not_of("0123456789");
2343 if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') {
2344 // Strip the index and look for the offset.
2345 OldName = OldName.substr(IndexEnd + 1);
2346 size_t OffsetEnd = OldName.find_first_not_of("0123456789");
2347 if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.')
2348 // Strip the offset.
2349 OldName = OldName.substr(OffsetEnd + 1);
2352 // Strip any SROA suffixes as well.
2353 OldName = OldName.substr(0, OldName.find(".sroa_"));
2356 return getAdjustedPtr(IRB, DL, &NewAI,
2357 APInt(DL.getPointerTypeSizeInBits(PointerTy), Offset),
2360 Twine(OldName) + "."
2367 /// \brief Compute suitable alignment to access this slice of the *new*
2370 /// You can optionally pass a type to this routine and if that type's ABI
2371 /// alignment is itself suitable, this will return zero.
2372 unsigned getSliceAlign(Type *Ty = nullptr) {
2373 unsigned NewAIAlign = NewAI.getAlignment();
2375 NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType());
2377 MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset);
2378 return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align;
2381 unsigned getIndex(uint64_t Offset) {
2382 assert(VecTy && "Can only call getIndex when rewriting a vector");
2383 uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2384 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
2385 uint32_t Index = RelOffset / ElementSize;
2386 assert(Index * ElementSize == RelOffset);
2390 void deleteIfTriviallyDead(Value *V) {
2391 Instruction *I = cast<Instruction>(V);
2392 if (isInstructionTriviallyDead(I))
2393 Pass.DeadInsts.insert(I);
2396 Value *rewriteVectorizedLoadInst() {
2397 unsigned BeginIndex = getIndex(NewBeginOffset);
2398 unsigned EndIndex = getIndex(NewEndOffset);
2399 assert(EndIndex > BeginIndex && "Empty vector!");
2401 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
2402 return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
2405 Value *rewriteIntegerLoad(LoadInst &LI) {
2406 assert(IntTy && "We cannot insert an integer to the alloca");
2407 assert(!LI.isVolatile());
2408 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
2409 V = convertValue(DL, IRB, V, IntTy);
2410 assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2411 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2412 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) {
2413 IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8);
2414 V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract");
2416 // It is possible that the extracted type is not the load type. This
2417 // happens if there is a load past the end of the alloca, and as
2418 // a consequence the slice is narrower but still a candidate for integer
2419 // lowering. To handle this case, we just zero extend the extracted
2421 assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 &&
2422 "Can only handle an extract for an overly wide load");
2423 if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8)
2424 V = IRB.CreateZExt(V, LI.getType());
2428 bool visitLoadInst(LoadInst &LI) {
2429 DEBUG(dbgs() << " original: " << LI << "\n");
2430 Value *OldOp = LI.getOperand(0);
2431 assert(OldOp == OldPtr);
2434 LI.getAAMetadata(AATags);
2436 unsigned AS = LI.getPointerAddressSpace();
2438 Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8)
2440 const bool IsLoadPastEnd = DL.getTypeStoreSize(TargetTy) > SliceSize;
2441 bool IsPtrAdjusted = false;
2444 V = rewriteVectorizedLoadInst();
2445 } else if (IntTy && LI.getType()->isIntegerTy()) {
2446 V = rewriteIntegerLoad(LI);
2447 } else if (NewBeginOffset == NewAllocaBeginOffset &&
2448 NewEndOffset == NewAllocaEndOffset &&
2449 (canConvertValue(DL, NewAllocaTy, TargetTy) ||
2450 (IsLoadPastEnd && NewAllocaTy->isIntegerTy() &&
2451 TargetTy->isIntegerTy()))) {
2452 LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2453 LI.isVolatile(), LI.getName());
2455 NewLI->setAAMetadata(AATags);
2456 if (LI.isVolatile())
2457 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
2459 // Any !nonnull metadata or !range metadata on the old load is also valid
2460 // on the new load. This is even true in some cases even when the loads
2461 // are different types, for example by mapping !nonnull metadata to
2462 // !range metadata by modeling the null pointer constant converted to the
2464 // FIXME: Add support for range metadata here. Currently the utilities
2465 // for this don't propagate range metadata in trivial cases from one
2466 // integer load to another, don't handle non-addrspace-0 null pointers
2467 // correctly, and don't have any support for mapping ranges as the
2468 // integer type becomes winder or narrower.
2469 if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull))
2470 copyNonnullMetadata(LI, N, *NewLI);
2472 // Try to preserve nonnull metadata
2475 // If this is an integer load past the end of the slice (which means the
2476 // bytes outside the slice are undef or this load is dead) just forcibly
2477 // fix the integer size with correct handling of endianness.
2478 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy))
2479 if (auto *TITy = dyn_cast<IntegerType>(TargetTy))
2480 if (AITy->getBitWidth() < TITy->getBitWidth()) {
2481 V = IRB.CreateZExt(V, TITy, "load.ext");
2482 if (DL.isBigEndian())
2483 V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(),
2487 Type *LTy = TargetTy->getPointerTo(AS);
2488 LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
2489 getSliceAlign(TargetTy),
2490 LI.isVolatile(), LI.getName());
2492 NewLI->setAAMetadata(AATags);
2493 if (LI.isVolatile())
2494 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
2497 IsPtrAdjusted = true;
2499 V = convertValue(DL, IRB, V, TargetTy);
2502 assert(!LI.isVolatile());
2503 assert(LI.getType()->isIntegerTy() &&
2504 "Only integer type loads and stores are split");
2505 assert(SliceSize < DL.getTypeStoreSize(LI.getType()) &&
2506 "Split load isn't smaller than original load");
2507 assert(LI.getType()->getIntegerBitWidth() ==
2508 DL.getTypeStoreSizeInBits(LI.getType()) &&
2509 "Non-byte-multiple bit width");
2510 // Move the insertion point just past the load so that we can refer to it.
2511 IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI)));
2512 // Create a placeholder value with the same type as LI to use as the
2513 // basis for the new value. This allows us to replace the uses of LI with
2514 // the computed value, and then replace the placeholder with LI, leaving
2515 // LI only used for this computation.
2516 Value *Placeholder =
2517 new LoadInst(UndefValue::get(LI.getType()->getPointerTo(AS)));
2518 V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset,
2520 LI.replaceAllUsesWith(V);
2521 Placeholder->replaceAllUsesWith(&LI);
2522 Placeholder->deleteValue();
2524 LI.replaceAllUsesWith(V);
2527 Pass.DeadInsts.insert(&LI);
2528 deleteIfTriviallyDead(OldOp);
2529 DEBUG(dbgs() << " to: " << *V << "\n");
2530 return !LI.isVolatile() && !IsPtrAdjusted;
2533 bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp,
2535 if (V->getType() != VecTy) {
2536 unsigned BeginIndex = getIndex(NewBeginOffset);
2537 unsigned EndIndex = getIndex(NewEndOffset);
2538 assert(EndIndex > BeginIndex && "Empty vector!");
2539 unsigned NumElements = EndIndex - BeginIndex;
2540 assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2541 Type *SliceTy = (NumElements == 1)
2543 : VectorType::get(ElementTy, NumElements);
2544 if (V->getType() != SliceTy)
2545 V = convertValue(DL, IRB, V, SliceTy);
2547 // Mix in the existing elements.
2548 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
2549 V = insertVector(IRB, Old, V, BeginIndex, "vec");
2551 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
2553 Store->setAAMetadata(AATags);
2554 Pass.DeadInsts.insert(&SI);
2556 DEBUG(dbgs() << " to: " << *Store << "\n");
2560 bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) {
2561 assert(IntTy && "We cannot extract an integer from the alloca");
2562 assert(!SI.isVolatile());
2563 if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
2565 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
2566 Old = convertValue(DL, IRB, Old, IntTy);
2567 assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2568 uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
2569 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert");
2571 V = convertValue(DL, IRB, V, NewAllocaTy);
2572 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
2573 Store->copyMetadata(SI, LLVMContext::MD_mem_parallel_loop_access);
2575 Store->setAAMetadata(AATags);
2576 Pass.DeadInsts.insert(&SI);
2577 DEBUG(dbgs() << " to: " << *Store << "\n");
2581 bool visitStoreInst(StoreInst &SI) {
2582 DEBUG(dbgs() << " original: " << SI << "\n");
2583 Value *OldOp = SI.getOperand(1);
2584 assert(OldOp == OldPtr);
2587 SI.getAAMetadata(AATags);
2589 Value *V = SI.getValueOperand();
2591 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2592 // alloca that should be re-examined after promoting this alloca.
2593 if (V->getType()->isPointerTy())
2594 if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
2595 Pass.PostPromotionWorklist.insert(AI);
2597 if (SliceSize < DL.getTypeStoreSize(V->getType())) {
2598 assert(!SI.isVolatile());
2599 assert(V->getType()->isIntegerTy() &&
2600 "Only integer type loads and stores are split");
2601 assert(V->getType()->getIntegerBitWidth() ==
2602 DL.getTypeStoreSizeInBits(V->getType()) &&
2603 "Non-byte-multiple bit width");
2604 IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8);
2605 V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset,
2610 return rewriteVectorizedStoreInst(V, SI, OldOp, AATags);
2611 if (IntTy && V->getType()->isIntegerTy())
2612 return rewriteIntegerStore(V, SI, AATags);
2614 const bool IsStorePastEnd = DL.getTypeStoreSize(V->getType()) > SliceSize;
2616 if (NewBeginOffset == NewAllocaBeginOffset &&
2617 NewEndOffset == NewAllocaEndOffset &&
2618 (canConvertValue(DL, V->getType(), NewAllocaTy) ||
2619 (IsStorePastEnd && NewAllocaTy->isIntegerTy() &&
2620 V->getType()->isIntegerTy()))) {
2621 // If this is an integer store past the end of slice (and thus the bytes
2622 // past that point are irrelevant or this is unreachable), truncate the
2623 // value prior to storing.
2624 if (auto *VITy = dyn_cast<IntegerType>(V->getType()))
2625 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy))
2626 if (VITy->getBitWidth() > AITy->getBitWidth()) {
2627 if (DL.isBigEndian())
2628 V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(),
2630 V = IRB.CreateTrunc(V, AITy, "load.trunc");
2633 V = convertValue(DL, IRB, V, NewAllocaTy);
2634 NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
2637 unsigned AS = SI.getPointerAddressSpace();
2638 Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS));
2639 NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()),
2642 NewSI->copyMetadata(SI, LLVMContext::MD_mem_parallel_loop_access);
2644 NewSI->setAAMetadata(AATags);
2645 if (SI.isVolatile())
2646 NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
2647 Pass.DeadInsts.insert(&SI);
2648 deleteIfTriviallyDead(OldOp);
2650 DEBUG(dbgs() << " to: " << *NewSI << "\n");
2651 return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile();
2654 /// \brief Compute an integer value from splatting an i8 across the given
2655 /// number of bytes.
2657 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't
2658 /// call this routine.
2659 /// FIXME: Heed the advice above.
2661 /// \param V The i8 value to splat.
2662 /// \param Size The number of bytes in the output (assuming i8 is one byte)
2663 Value *getIntegerSplat(Value *V, unsigned Size) {
2664 assert(Size > 0 && "Expected a positive number of bytes.");
2665 IntegerType *VTy = cast<IntegerType>(V->getType());
2666 assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte");
2670 Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8);
2672 IRB.CreateZExt(V, SplatIntTy, "zext"),
2673 ConstantExpr::getUDiv(
2674 Constant::getAllOnesValue(SplatIntTy),
2675 ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()),
2681 /// \brief Compute a vector splat for a given element value.
2682 Value *getVectorSplat(Value *V, unsigned NumElements) {
2683 V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
2684 DEBUG(dbgs() << " splat: " << *V << "\n");
2688 bool visitMemSetInst(MemSetInst &II) {
2689 DEBUG(dbgs() << " original: " << II << "\n");
2690 assert(II.getRawDest() == OldPtr);
2693 II.getAAMetadata(AATags);
2695 // If the memset has a variable size, it cannot be split, just adjust the
2696 // pointer to the new alloca.
2697 if (!isa<Constant>(II.getLength())) {
2699 assert(NewBeginOffset == BeginOffset);
2700 II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType()));
2701 II.setDestAlignment(getSliceAlign());
2703 deleteIfTriviallyDead(OldPtr);
2707 // Record this instruction for deletion.
2708 Pass.DeadInsts.insert(&II);
2710 Type *AllocaTy = NewAI.getAllocatedType();
2711 Type *ScalarTy = AllocaTy->getScalarType();
2713 // If this doesn't map cleanly onto the alloca type, and that type isn't
2714 // a single value type, just emit a memset.
2715 if (!VecTy && !IntTy &&
2716 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset ||
2717 SliceSize != DL.getTypeStoreSize(AllocaTy) ||
2718 !AllocaTy->isSingleValueType() ||
2719 !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) ||
2720 DL.getTypeSizeInBits(ScalarTy) % 8 != 0)) {
2721 Type *SizeTy = II.getLength()->getType();
2722 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2723 CallInst *New = IRB.CreateMemSet(
2724 getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size,
2725 getSliceAlign(), II.isVolatile());
2727 New->setAAMetadata(AATags);
2728 DEBUG(dbgs() << " to: " << *New << "\n");
2732 // If we can represent this as a simple value, we have to build the actual
2733 // value to store, which requires expanding the byte present in memset to
2734 // a sensible representation for the alloca type. This is essentially
2735 // splatting the byte to a sufficiently wide integer, splatting it across
2736 // any desired vector width, and bitcasting to the final type.
2740 // If this is a memset of a vectorized alloca, insert it.
2741 assert(ElementTy == ScalarTy);
2743 unsigned BeginIndex = getIndex(NewBeginOffset);
2744 unsigned EndIndex = getIndex(NewEndOffset);
2745 assert(EndIndex > BeginIndex && "Empty vector!");
2746 unsigned NumElements = EndIndex - BeginIndex;
2747 assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2750 getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8);
2751 Splat = convertValue(DL, IRB, Splat, ElementTy);
2752 if (NumElements > 1)
2753 Splat = getVectorSplat(Splat, NumElements);
2756 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
2757 V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
2759 // If this is a memset on an alloca where we can widen stores, insert the
2761 assert(!II.isVolatile());
2763 uint64_t Size = NewEndOffset - NewBeginOffset;
2764 V = getIntegerSplat(II.getValue(), Size);
2766 if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
2767 EndOffset != NewAllocaBeginOffset)) {
2769 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
2770 Old = convertValue(DL, IRB, Old, IntTy);
2771 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2772 V = insertInteger(DL, IRB, Old, V, Offset, "insert");
2774 assert(V->getType() == IntTy &&
2775 "Wrong type for an alloca wide integer!");
2777 V = convertValue(DL, IRB, V, AllocaTy);
2779 // Established these invariants above.
2780 assert(NewBeginOffset == NewAllocaBeginOffset);
2781 assert(NewEndOffset == NewAllocaEndOffset);
2783 V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8);
2784 if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
2785 V = getVectorSplat(V, AllocaVecTy->getNumElements());
2787 V = convertValue(DL, IRB, V, AllocaTy);
2790 StoreInst *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
2793 New->setAAMetadata(AATags);
2794 DEBUG(dbgs() << " to: " << *New << "\n");
2795 return !II.isVolatile();
2798 bool visitMemTransferInst(MemTransferInst &II) {
2799 // Rewriting of memory transfer instructions can be a bit tricky. We break
2800 // them into two categories: split intrinsics and unsplit intrinsics.
2802 DEBUG(dbgs() << " original: " << II << "\n");
2805 II.getAAMetadata(AATags);
2807 bool IsDest = &II.getRawDestUse() == OldUse;
2808 assert((IsDest && II.getRawDest() == OldPtr) ||
2809 (!IsDest && II.getRawSource() == OldPtr));
2811 unsigned SliceAlign = getSliceAlign();
2813 // For unsplit intrinsics, we simply modify the source and destination
2814 // pointers in place. This isn't just an optimization, it is a matter of
2815 // correctness. With unsplit intrinsics we may be dealing with transfers
2816 // within a single alloca before SROA ran, or with transfers that have
2817 // a variable length. We may also be dealing with memmove instead of
2818 // memcpy, and so simply updating the pointers is the necessary for us to
2819 // update both source and dest of a single call.
2820 if (!IsSplittable) {
2821 Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2823 II.setDest(AdjustedPtr);
2824 II.setDestAlignment(SliceAlign);
2827 II.setSource(AdjustedPtr);
2828 II.setSourceAlignment(SliceAlign);
2831 DEBUG(dbgs() << " to: " << II << "\n");
2832 deleteIfTriviallyDead(OldPtr);
2835 // For split transfer intrinsics we have an incredibly useful assurance:
2836 // the source and destination do not reside within the same alloca, and at
2837 // least one of them does not escape. This means that we can replace
2838 // memmove with memcpy, and we don't need to worry about all manner of
2839 // downsides to splitting and transforming the operations.
2841 // If this doesn't map cleanly onto the alloca type, and that type isn't
2842 // a single value type, just emit a memcpy.
2845 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset ||
2846 SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) ||
2847 !NewAI.getAllocatedType()->isSingleValueType());
2849 // If we're just going to emit a memcpy, the alloca hasn't changed, and the
2850 // size hasn't been shrunk based on analysis of the viable range, this is
2852 if (EmitMemCpy && &OldAI == &NewAI) {
2853 // Ensure the start lines up.
2854 assert(NewBeginOffset == BeginOffset);
2856 // Rewrite the size as needed.
2857 if (NewEndOffset != EndOffset)
2858 II.setLength(ConstantInt::get(II.getLength()->getType(),
2859 NewEndOffset - NewBeginOffset));
2862 // Record this instruction for deletion.
2863 Pass.DeadInsts.insert(&II);
2865 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2866 // alloca that should be re-examined after rewriting this instruction.
2867 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
2868 if (AllocaInst *AI =
2869 dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) {
2870 assert(AI != &OldAI && AI != &NewAI &&
2871 "Splittable transfers cannot reach the same alloca on both ends.");
2872 Pass.Worklist.insert(AI);
2875 Type *OtherPtrTy = OtherPtr->getType();
2876 unsigned OtherAS = OtherPtrTy->getPointerAddressSpace();
2878 // Compute the relative offset for the other pointer within the transfer.
2879 unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS);
2880 APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
2881 unsigned OtherAlign =
2882 IsDest ? II.getSourceAlignment() : II.getDestAlignment();
2883 OtherAlign = MinAlign(OtherAlign ? OtherAlign : 1,
2884 OtherOffset.zextOrTrunc(64).getZExtValue());
2887 // Compute the other pointer, folding as much as possible to produce
2888 // a single, simple GEP in most cases.
2889 OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
2890 OtherPtr->getName() + ".");
2892 Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2893 Type *SizeTy = II.getLength()->getType();
2894 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2896 Value *DestPtr, *SrcPtr;
2897 unsigned DestAlign, SrcAlign;
2898 // Note: IsDest is true iff we're copying into the new alloca slice
2901 DestAlign = SliceAlign;
2903 SrcAlign = OtherAlign;
2906 DestAlign = OtherAlign;
2908 SrcAlign = SliceAlign;
2910 CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign,
2911 Size, II.isVolatile());
2913 New->setAAMetadata(AATags);
2914 DEBUG(dbgs() << " to: " << *New << "\n");
2918 bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
2919 NewEndOffset == NewAllocaEndOffset;
2920 uint64_t Size = NewEndOffset - NewBeginOffset;
2921 unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0;
2922 unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
2923 unsigned NumElements = EndIndex - BeginIndex;
2924 IntegerType *SubIntTy =
2925 IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr;
2927 // Reset the other pointer type to match the register type we're going to
2928 // use, but using the address space of the original other pointer.
2929 if (VecTy && !IsWholeAlloca) {
2930 if (NumElements == 1)
2931 OtherPtrTy = VecTy->getElementType();
2933 OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
2935 OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS);
2936 } else if (IntTy && !IsWholeAlloca) {
2937 OtherPtrTy = SubIntTy->getPointerTo(OtherAS);
2939 OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS);
2942 Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
2943 OtherPtr->getName() + ".");
2944 unsigned SrcAlign = OtherAlign;
2945 Value *DstPtr = &NewAI;
2946 unsigned DstAlign = SliceAlign;
2948 std::swap(SrcPtr, DstPtr);
2949 std::swap(SrcAlign, DstAlign);
2953 if (VecTy && !IsWholeAlloca && !IsDest) {
2954 Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
2955 Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
2956 } else if (IntTy && !IsWholeAlloca && !IsDest) {
2957 Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
2958 Src = convertValue(DL, IRB, Src, IntTy);
2959 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2960 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
2962 LoadInst *Load = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(),
2965 Load->setAAMetadata(AATags);
2969 if (VecTy && !IsWholeAlloca && IsDest) {
2971 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
2972 Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
2973 } else if (IntTy && !IsWholeAlloca && IsDest) {
2975 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
2976 Old = convertValue(DL, IRB, Old, IntTy);
2977 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2978 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
2979 Src = convertValue(DL, IRB, Src, NewAllocaTy);
2982 StoreInst *Store = cast<StoreInst>(
2983 IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile()));
2985 Store->setAAMetadata(AATags);
2986 DEBUG(dbgs() << " to: " << *Store << "\n");
2987 return !II.isVolatile();
2990 bool visitIntrinsicInst(IntrinsicInst &II) {
2991 assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
2992 II.getIntrinsicID() == Intrinsic::lifetime_end);
2993 DEBUG(dbgs() << " original: " << II << "\n");
2994 assert(II.getArgOperand(1) == OldPtr);
2996 // Record this instruction for deletion.
2997 Pass.DeadInsts.insert(&II);
2999 // Lifetime intrinsics are only promotable if they cover the whole alloca.
3000 // Therefore, we drop lifetime intrinsics which don't cover the whole
3002 // (In theory, intrinsics which partially cover an alloca could be
3003 // promoted, but PromoteMemToReg doesn't handle that case.)
3004 // FIXME: Check whether the alloca is promotable before dropping the
3005 // lifetime intrinsics?
3006 if (NewBeginOffset != NewAllocaBeginOffset ||
3007 NewEndOffset != NewAllocaEndOffset)
3011 ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
3012 NewEndOffset - NewBeginOffset);
3013 Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
3015 if (II.getIntrinsicID() == Intrinsic::lifetime_start)
3016 New = IRB.CreateLifetimeStart(Ptr, Size);
3018 New = IRB.CreateLifetimeEnd(Ptr, Size);
3021 DEBUG(dbgs() << " to: " << *New << "\n");
3026 bool visitPHINode(PHINode &PN) {
3027 DEBUG(dbgs() << " original: " << PN << "\n");
3028 assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable");
3029 assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable");
3031 // We would like to compute a new pointer in only one place, but have it be
3032 // as local as possible to the PHI. To do that, we re-use the location of
3033 // the old pointer, which necessarily must be in the right position to
3034 // dominate the PHI.
3035 IRBuilderTy PtrBuilder(IRB);
3036 if (isa<PHINode>(OldPtr))
3037 PtrBuilder.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt());
3039 PtrBuilder.SetInsertPoint(OldPtr);
3040 PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc());
3042 Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType());
3043 // Replace the operands which were using the old pointer.
3044 std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
3046 DEBUG(dbgs() << " to: " << PN << "\n");
3047 deleteIfTriviallyDead(OldPtr);
3049 // PHIs can't be promoted on their own, but often can be speculated. We
3050 // check the speculation outside of the rewriter so that we see the
3051 // fully-rewritten alloca.
3052 PHIUsers.insert(&PN);
3056 bool visitSelectInst(SelectInst &SI) {
3057 DEBUG(dbgs() << " original: " << SI << "\n");
3058 assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
3059 "Pointer isn't an operand!");
3060 assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
3061 assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
3063 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
3064 // Replace the operands which were using the old pointer.
3065 if (SI.getOperand(1) == OldPtr)
3066 SI.setOperand(1, NewPtr);
3067 if (SI.getOperand(2) == OldPtr)
3068 SI.setOperand(2, NewPtr);
3070 DEBUG(dbgs() << " to: " << SI << "\n");
3071 deleteIfTriviallyDead(OldPtr);
3073 // Selects can't be promoted on their own, but often can be speculated. We
3074 // check the speculation outside of the rewriter so that we see the
3075 // fully-rewritten alloca.
3076 SelectUsers.insert(&SI);
3083 /// \brief Visitor to rewrite aggregate loads and stores as scalar.
3085 /// This pass aggressively rewrites all aggregate loads and stores on
3086 /// a particular pointer (or any pointer derived from it which we can identify)
3087 /// with scalar loads and stores.
3088 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
3089 // Befriend the base class so it can delegate to private visit methods.
3090 friend class InstVisitor<AggLoadStoreRewriter, bool>;
3092 /// Queue of pointer uses to analyze and potentially rewrite.
3093 SmallVector<Use *, 8> Queue;
3095 /// Set to prevent us from cycling with phi nodes and loops.
3096 SmallPtrSet<User *, 8> Visited;
3098 /// The current pointer use being rewritten. This is used to dig up the used
3099 /// value (as opposed to the user).
3103 /// Rewrite loads and stores through a pointer and all pointers derived from
3105 bool rewrite(Instruction &I) {
3106 DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
3108 bool Changed = false;
3109 while (!Queue.empty()) {
3110 U = Queue.pop_back_val();
3111 Changed |= visit(cast<Instruction>(U->getUser()));
3117 /// Enqueue all the users of the given instruction for further processing.
3118 /// This uses a set to de-duplicate users.
3119 void enqueueUsers(Instruction &I) {
3120 for (Use &U : I.uses())
3121 if (Visited.insert(U.getUser()).second)
3122 Queue.push_back(&U);
3125 // Conservative default is to not rewrite anything.
3126 bool visitInstruction(Instruction &I) { return false; }
3128 /// \brief Generic recursive split emission class.
3129 template <typename Derived> class OpSplitter {
3131 /// The builder used to form new instructions.
3134 /// The indices which to be used with insert- or extractvalue to select the
3135 /// appropriate value within the aggregate.
3136 SmallVector<unsigned, 4> Indices;
3138 /// The indices to a GEP instruction which will move Ptr to the correct slot
3139 /// within the aggregate.
3140 SmallVector<Value *, 4> GEPIndices;
3142 /// The base pointer of the original op, used as a base for GEPing the
3143 /// split operations.
3146 /// Initialize the splitter with an insertion point, Ptr and start with a
3147 /// single zero GEP index.
3148 OpSplitter(Instruction *InsertionPoint, Value *Ptr)
3149 : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
3152 /// \brief Generic recursive split emission routine.
3154 /// This method recursively splits an aggregate op (load or store) into
3155 /// scalar or vector ops. It splits recursively until it hits a single value
3156 /// and emits that single value operation via the template argument.
3158 /// The logic of this routine relies on GEPs and insertvalue and
3159 /// extractvalue all operating with the same fundamental index list, merely
3160 /// formatted differently (GEPs need actual values).
3162 /// \param Ty The type being split recursively into smaller ops.
3163 /// \param Agg The aggregate value being built up or stored, depending on
3164 /// whether this is splitting a load or a store respectively.
3165 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
3166 if (Ty->isSingleValueType())
3167 return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name);
3169 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
3170 unsigned OldSize = Indices.size();
3172 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
3174 assert(Indices.size() == OldSize && "Did not return to the old size");
3175 Indices.push_back(Idx);
3176 GEPIndices.push_back(IRB.getInt32(Idx));
3177 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
3178 GEPIndices.pop_back();
3184 if (StructType *STy = dyn_cast<StructType>(Ty)) {
3185 unsigned OldSize = Indices.size();
3187 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
3189 assert(Indices.size() == OldSize && "Did not return to the old size");
3190 Indices.push_back(Idx);
3191 GEPIndices.push_back(IRB.getInt32(Idx));
3192 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
3193 GEPIndices.pop_back();
3199 llvm_unreachable("Only arrays and structs are aggregate loadable types");
3203 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
3206 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, AAMDNodes AATags)
3207 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr), AATags(AATags) {}
3209 /// Emit a leaf load of a single value. This is called at the leaves of the
3210 /// recursive emission to actually load values.
3211 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
3212 assert(Ty->isSingleValueType());
3213 // Load the single value and insert it using the indices.
3215 IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep");
3216 LoadInst *Load = IRB.CreateLoad(GEP, Name + ".load");
3218 Load->setAAMetadata(AATags);
3219 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
3220 DEBUG(dbgs() << " to: " << *Load << "\n");
3224 bool visitLoadInst(LoadInst &LI) {
3225 assert(LI.getPointerOperand() == *U);
3226 if (!LI.isSimple() || LI.getType()->isSingleValueType())
3229 // We have an aggregate being loaded, split it apart.
3230 DEBUG(dbgs() << " original: " << LI << "\n");
3232 LI.getAAMetadata(AATags);
3233 LoadOpSplitter Splitter(&LI, *U, AATags);
3234 Value *V = UndefValue::get(LI.getType());
3235 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
3236 LI.replaceAllUsesWith(V);
3237 LI.eraseFromParent();
3241 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
3242 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, AAMDNodes AATags)
3243 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr), AATags(AATags) {}
3246 /// Emit a leaf store of a single value. This is called at the leaves of the
3247 /// recursive emission to actually produce stores.
3248 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
3249 assert(Ty->isSingleValueType());
3250 // Extract the single value and store it using the indices.
3252 // The gep and extractvalue values are factored out of the CreateStore
3253 // call to make the output independent of the argument evaluation order.
3254 Value *ExtractValue =
3255 IRB.CreateExtractValue(Agg, Indices, Name + ".extract");
3256 Value *InBoundsGEP =
3257 IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep");
3258 StoreInst *Store = IRB.CreateStore(ExtractValue, InBoundsGEP);
3260 Store->setAAMetadata(AATags);
3261 DEBUG(dbgs() << " to: " << *Store << "\n");
3265 bool visitStoreInst(StoreInst &SI) {
3266 if (!SI.isSimple() || SI.getPointerOperand() != *U)
3268 Value *V = SI.getValueOperand();
3269 if (V->getType()->isSingleValueType())
3272 // We have an aggregate being stored, split it apart.
3273 DEBUG(dbgs() << " original: " << SI << "\n");
3275 SI.getAAMetadata(AATags);
3276 StoreOpSplitter Splitter(&SI, *U, AATags);
3277 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
3278 SI.eraseFromParent();
3282 bool visitBitCastInst(BitCastInst &BC) {
3287 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
3292 bool visitPHINode(PHINode &PN) {
3297 bool visitSelectInst(SelectInst &SI) {
3303 } // end anonymous namespace
3305 /// \brief Strip aggregate type wrapping.
3307 /// This removes no-op aggregate types wrapping an underlying type. It will
3308 /// strip as many layers of types as it can without changing either the type
3309 /// size or the allocated size.
3310 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {
3311 if (Ty->isSingleValueType())
3314 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3315 uint64_t TypeSize = DL.getTypeSizeInBits(Ty);
3318 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
3319 InnerTy = ArrTy->getElementType();
3320 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
3321 const StructLayout *SL = DL.getStructLayout(STy);
3322 unsigned Index = SL->getElementContainingOffset(0);
3323 InnerTy = STy->getElementType(Index);
3328 if (AllocSize > DL.getTypeAllocSize(InnerTy) ||
3329 TypeSize > DL.getTypeSizeInBits(InnerTy))
3332 return stripAggregateTypeWrapping(DL, InnerTy);
3335 /// \brief Try to find a partition of the aggregate type passed in for a given
3336 /// offset and size.
3338 /// This recurses through the aggregate type and tries to compute a subtype
3339 /// based on the offset and size. When the offset and size span a sub-section
3340 /// of an array, it will even compute a new array type for that sub-section,
3341 /// and the same for structs.
3343 /// Note that this routine is very strict and tries to find a partition of the
3344 /// type which produces the *exact* right offset and size. It is not forgiving
3345 /// when the size or offset cause either end of type-based partition to be off.
3346 /// Also, this is a best-effort routine. It is reasonable to give up and not
3347 /// return a type if necessary.
3348 static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset,
3350 if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size)
3351 return stripAggregateTypeWrapping(DL, Ty);
3352 if (Offset > DL.getTypeAllocSize(Ty) ||
3353 (DL.getTypeAllocSize(Ty) - Offset) < Size)
3356 if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
3357 Type *ElementTy = SeqTy->getElementType();
3358 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
3359 uint64_t NumSkippedElements = Offset / ElementSize;
3360 if (NumSkippedElements >= SeqTy->getNumElements())
3362 Offset -= NumSkippedElements * ElementSize;
3364 // First check if we need to recurse.
3365 if (Offset > 0 || Size < ElementSize) {
3366 // Bail if the partition ends in a different array element.
3367 if ((Offset + Size) > ElementSize)
3369 // Recurse through the element type trying to peel off offset bytes.
3370 return getTypePartition(DL, ElementTy, Offset, Size);
3372 assert(Offset == 0);
3374 if (Size == ElementSize)
3375 return stripAggregateTypeWrapping(DL, ElementTy);
3376 assert(Size > ElementSize);
3377 uint64_t NumElements = Size / ElementSize;
3378 if (NumElements * ElementSize != Size)
3380 return ArrayType::get(ElementTy, NumElements);
3383 StructType *STy = dyn_cast<StructType>(Ty);
3387 const StructLayout *SL = DL.getStructLayout(STy);
3388 if (Offset >= SL->getSizeInBytes())
3390 uint64_t EndOffset = Offset + Size;
3391 if (EndOffset > SL->getSizeInBytes())
3394 unsigned Index = SL->getElementContainingOffset(Offset);
3395 Offset -= SL->getElementOffset(Index);
3397 Type *ElementTy = STy->getElementType(Index);
3398 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
3399 if (Offset >= ElementSize)
3400 return nullptr; // The offset points into alignment padding.
3402 // See if any partition must be contained by the element.
3403 if (Offset > 0 || Size < ElementSize) {
3404 if ((Offset + Size) > ElementSize)
3406 return getTypePartition(DL, ElementTy, Offset, Size);
3408 assert(Offset == 0);
3410 if (Size == ElementSize)
3411 return stripAggregateTypeWrapping(DL, ElementTy);
3413 StructType::element_iterator EI = STy->element_begin() + Index,
3414 EE = STy->element_end();
3415 if (EndOffset < SL->getSizeInBytes()) {
3416 unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
3417 if (Index == EndIndex)
3418 return nullptr; // Within a single element and its padding.
3420 // Don't try to form "natural" types if the elements don't line up with the
3422 // FIXME: We could potentially recurse down through the last element in the
3423 // sub-struct to find a natural end point.
3424 if (SL->getElementOffset(EndIndex) != EndOffset)
3427 assert(Index < EndIndex);
3428 EE = STy->element_begin() + EndIndex;
3431 // Try to build up a sub-structure.
3433 StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked());
3434 const StructLayout *SubSL = DL.getStructLayout(SubTy);
3435 if (Size != SubSL->getSizeInBytes())
3436 return nullptr; // The sub-struct doesn't have quite the size needed.
3441 /// \brief Pre-split loads and stores to simplify rewriting.
3443 /// We want to break up the splittable load+store pairs as much as
3444 /// possible. This is important to do as a preprocessing step, as once we
3445 /// start rewriting the accesses to partitions of the alloca we lose the
3446 /// necessary information to correctly split apart paired loads and stores
3447 /// which both point into this alloca. The case to consider is something like
3450 /// %a = alloca [12 x i8]
3451 /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0
3452 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4
3453 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8
3454 /// %iptr1 = bitcast i8* %gep1 to i64*
3455 /// %iptr2 = bitcast i8* %gep2 to i64*
3456 /// %fptr1 = bitcast i8* %gep1 to float*
3457 /// %fptr2 = bitcast i8* %gep2 to float*
3458 /// %fptr3 = bitcast i8* %gep3 to float*
3459 /// store float 0.0, float* %fptr1
3460 /// store float 1.0, float* %fptr2
3461 /// %v = load i64* %iptr1
3462 /// store i64 %v, i64* %iptr2
3463 /// %f1 = load float* %fptr2
3464 /// %f2 = load float* %fptr3
3466 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and
3467 /// promote everything so we recover the 2 SSA values that should have been
3468 /// there all along.
3470 /// \returns true if any changes are made.
3471 bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
3472 DEBUG(dbgs() << "Pre-splitting loads and stores\n");
3474 // Track the loads and stores which are candidates for pre-splitting here, in
3475 // the order they first appear during the partition scan. These give stable
3476 // iteration order and a basis for tracking which loads and stores we
3478 SmallVector<LoadInst *, 4> Loads;
3479 SmallVector<StoreInst *, 4> Stores;
3481 // We need to accumulate the splits required of each load or store where we
3482 // can find them via a direct lookup. This is important to cross-check loads
3483 // and stores against each other. We also track the slice so that we can kill
3484 // all the slices that end up split.
3485 struct SplitOffsets {
3487 std::vector<uint64_t> Splits;
3489 SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap;
3491 // Track loads out of this alloca which cannot, for any reason, be pre-split.
3492 // This is important as we also cannot pre-split stores of those loads!
3493 // FIXME: This is all pretty gross. It means that we can be more aggressive
3494 // in pre-splitting when the load feeding the store happens to come from
3495 // a separate alloca. Put another way, the effectiveness of SROA would be
3496 // decreased by a frontend which just concatenated all of its local allocas
3497 // into one big flat alloca. But defeating such patterns is exactly the job
3498 // SROA is tasked with! Sadly, to not have this discrepancy we would have
3499 // change store pre-splitting to actually force pre-splitting of the load
3500 // that feeds it *and all stores*. That makes pre-splitting much harder, but
3501 // maybe it would make it more principled?
3502 SmallPtrSet<LoadInst *, 8> UnsplittableLoads;
3504 DEBUG(dbgs() << " Searching for candidate loads and stores\n");
3505 for (auto &P : AS.partitions()) {
3506 for (Slice &S : P) {
3507 Instruction *I = cast<Instruction>(S.getUse()->getUser());
3508 if (!S.isSplittable() || S.endOffset() <= P.endOffset()) {
3509 // If this is a load we have to track that it can't participate in any
3510 // pre-splitting. If this is a store of a load we have to track that
3511 // that load also can't participate in any pre-splitting.
3512 if (auto *LI = dyn_cast<LoadInst>(I))
3513 UnsplittableLoads.insert(LI);
3514 else if (auto *SI = dyn_cast<StoreInst>(I))
3515 if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand()))
3516 UnsplittableLoads.insert(LI);
3519 assert(P.endOffset() > S.beginOffset() &&
3520 "Empty or backwards partition!");
3522 // Determine if this is a pre-splittable slice.
3523 if (auto *LI = dyn_cast<LoadInst>(I)) {
3524 assert(!LI->isVolatile() && "Cannot split volatile loads!");
3526 // The load must be used exclusively to store into other pointers for
3527 // us to be able to arbitrarily pre-split it. The stores must also be
3528 // simple to avoid changing semantics.
3529 auto IsLoadSimplyStored = [](LoadInst *LI) {
3530 for (User *LU : LI->users()) {
3531 auto *SI = dyn_cast<StoreInst>(LU);
3532 if (!SI || !SI->isSimple())
3537 if (!IsLoadSimplyStored(LI)) {
3538 UnsplittableLoads.insert(LI);
3542 Loads.push_back(LI);
3543 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3544 if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex()))
3545 // Skip stores *of* pointers. FIXME: This shouldn't even be possible!
3547 auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand());
3548 if (!StoredLoad || !StoredLoad->isSimple())
3550 assert(!SI->isVolatile() && "Cannot split volatile stores!");
3552 Stores.push_back(SI);
3554 // Other uses cannot be pre-split.
3558 // Record the initial split.
3559 DEBUG(dbgs() << " Candidate: " << *I << "\n");
3560 auto &Offsets = SplitOffsetsMap[I];
3561 assert(Offsets.Splits.empty() &&
3562 "Should not have splits the first time we see an instruction!");
3564 Offsets.Splits.push_back(P.endOffset() - S.beginOffset());
3567 // Now scan the already split slices, and add a split for any of them which
3568 // we're going to pre-split.
3569 for (Slice *S : P.splitSliceTails()) {
3570 auto SplitOffsetsMapI =
3571 SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser()));
3572 if (SplitOffsetsMapI == SplitOffsetsMap.end())
3574 auto &Offsets = SplitOffsetsMapI->second;
3576 assert(Offsets.S == S && "Found a mismatched slice!");
3577 assert(!Offsets.Splits.empty() &&
3578 "Cannot have an empty set of splits on the second partition!");
3579 assert(Offsets.Splits.back() ==
3580 P.beginOffset() - Offsets.S->beginOffset() &&
3581 "Previous split does not end where this one begins!");
3583 // Record each split. The last partition's end isn't needed as the size
3584 // of the slice dictates that.
3585 if (S->endOffset() > P.endOffset())
3586 Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset());
3590 // We may have split loads where some of their stores are split stores. For
3591 // such loads and stores, we can only pre-split them if their splits exactly
3592 // match relative to their starting offset. We have to verify this prior to
3595 llvm::remove_if(Stores,
3596 [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) {
3597 // Lookup the load we are storing in our map of split
3599 auto *LI = cast<LoadInst>(SI->getValueOperand());
3600 // If it was completely unsplittable, then we're done,
3601 // and this store can't be pre-split.
3602 if (UnsplittableLoads.count(LI))
3605 auto LoadOffsetsI = SplitOffsetsMap.find(LI);
3606 if (LoadOffsetsI == SplitOffsetsMap.end())
3607 return false; // Unrelated loads are definitely safe.
3608 auto &LoadOffsets = LoadOffsetsI->second;
3610 // Now lookup the store's offsets.
3611 auto &StoreOffsets = SplitOffsetsMap[SI];
3613 // If the relative offsets of each split in the load and
3614 // store match exactly, then we can split them and we
3615 // don't need to remove them here.
3616 if (LoadOffsets.Splits == StoreOffsets.Splits)
3620 << " Mismatched splits for load and store:\n"
3621 << " " << *LI << "\n"
3622 << " " << *SI << "\n");
3624 // We've found a store and load that we need to split
3625 // with mismatched relative splits. Just give up on them
3626 // and remove both instructions from our list of
3628 UnsplittableLoads.insert(LI);
3632 // Now we have to go *back* through all the stores, because a later store may
3633 // have caused an earlier store's load to become unsplittable and if it is
3634 // unsplittable for the later store, then we can't rely on it being split in
3635 // the earlier store either.
3636 Stores.erase(llvm::remove_if(Stores,
3637 [&UnsplittableLoads](StoreInst *SI) {
3639 cast<LoadInst>(SI->getValueOperand());
3640 return UnsplittableLoads.count(LI);
3643 // Once we've established all the loads that can't be split for some reason,
3644 // filter any that made it into our list out.
3645 Loads.erase(llvm::remove_if(Loads,
3646 [&UnsplittableLoads](LoadInst *LI) {
3647 return UnsplittableLoads.count(LI);
3651 // If no loads or stores are left, there is no pre-splitting to be done for
3653 if (Loads.empty() && Stores.empty())
3656 // From here on, we can't fail and will be building new accesses, so rig up
3658 IRBuilderTy IRB(&AI);
3660 // Collect the new slices which we will merge into the alloca slices.
3661 SmallVector<Slice, 4> NewSlices;
3663 // Track any allocas we end up splitting loads and stores for so we iterate
3665 SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas;
3667 // At this point, we have collected all of the loads and stores we can
3668 // pre-split, and the specific splits needed for them. We actually do the
3669 // splitting in a specific order in order to handle when one of the loads in
3670 // the value operand to one of the stores.
3672 // First, we rewrite all of the split loads, and just accumulate each split
3673 // load in a parallel structure. We also build the slices for them and append
3674 // them to the alloca slices.
3675 SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap;
3676 std::vector<LoadInst *> SplitLoads;
3677 const DataLayout &DL = AI.getModule()->getDataLayout();
3678 for (LoadInst *LI : Loads) {
3681 IntegerType *Ty = cast<IntegerType>(LI->getType());
3682 uint64_t LoadSize = Ty->getBitWidth() / 8;
3683 assert(LoadSize > 0 && "Cannot have a zero-sized integer load!");
3685 auto &Offsets = SplitOffsetsMap[LI];
3686 assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() &&
3687 "Slice size should always match load size exactly!");
3688 uint64_t BaseOffset = Offsets.S->beginOffset();
3689 assert(BaseOffset + LoadSize > BaseOffset &&
3690 "Cannot represent alloca access size using 64-bit integers!");
3692 Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand());
3693 IRB.SetInsertPoint(LI);
3695 DEBUG(dbgs() << " Splitting load: " << *LI << "\n");
3697 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
3698 int Idx = 0, Size = Offsets.Splits.size();
3700 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
3701 auto AS = LI->getPointerAddressSpace();
3702 auto *PartPtrTy = PartTy->getPointerTo(AS);
3703 LoadInst *PLoad = IRB.CreateAlignedLoad(
3704 getAdjustedPtr(IRB, DL, BasePtr,
3705 APInt(DL.getIndexSizeInBits(AS), PartOffset),
3706 PartPtrTy, BasePtr->getName() + "."),
3707 getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
3709 PLoad->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access);
3711 // Append this load onto the list of split loads so we can find it later
3712 // to rewrite the stores.
3713 SplitLoads.push_back(PLoad);
3715 // Now build a new slice for the alloca.
3716 NewSlices.push_back(
3717 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
3718 &PLoad->getOperandUse(PLoad->getPointerOperandIndex()),
3719 /*IsSplittable*/ false));
3720 DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
3721 << ", " << NewSlices.back().endOffset() << "): " << *PLoad
3724 // See if we've handled all the splits.
3728 // Setup the next partition.
3729 PartOffset = Offsets.Splits[Idx];
3731 PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset;
3734 // Now that we have the split loads, do the slow walk over all uses of the
3735 // load and rewrite them as split stores, or save the split loads to use
3736 // below if the store is going to be split there anyways.
3737 bool DeferredStores = false;
3738 for (User *LU : LI->users()) {
3739 StoreInst *SI = cast<StoreInst>(LU);
3740 if (!Stores.empty() && SplitOffsetsMap.count(SI)) {
3741 DeferredStores = true;
3742 DEBUG(dbgs() << " Deferred splitting of store: " << *SI << "\n");
3746 Value *StoreBasePtr = SI->getPointerOperand();
3747 IRB.SetInsertPoint(SI);
3749 DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n");
3751 for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) {
3752 LoadInst *PLoad = SplitLoads[Idx];
3753 uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1];
3755 PLoad->getType()->getPointerTo(SI->getPointerAddressSpace());
3757 auto AS = SI->getPointerAddressSpace();
3758 StoreInst *PStore = IRB.CreateAlignedStore(
3760 getAdjustedPtr(IRB, DL, StoreBasePtr,
3761 APInt(DL.getIndexSizeInBits(AS), PartOffset),
3762 PartPtrTy, StoreBasePtr->getName() + "."),
3763 getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
3764 PStore->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access);
3765 DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n");
3768 // We want to immediately iterate on any allocas impacted by splitting
3769 // this store, and we have to track any promotable alloca (indicated by
3770 // a direct store) as needing to be resplit because it is no longer
3772 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) {
3773 ResplitPromotableAllocas.insert(OtherAI);
3774 Worklist.insert(OtherAI);
3775 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(
3776 StoreBasePtr->stripInBoundsOffsets())) {
3777 Worklist.insert(OtherAI);
3780 // Mark the original store as dead.
3781 DeadInsts.insert(SI);
3784 // Save the split loads if there are deferred stores among the users.
3786 SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads)));
3788 // Mark the original load as dead and kill the original slice.
3789 DeadInsts.insert(LI);
3793 // Second, we rewrite all of the split stores. At this point, we know that
3794 // all loads from this alloca have been split already. For stores of such
3795 // loads, we can simply look up the pre-existing split loads. For stores of
3796 // other loads, we split those loads first and then write split stores of
3798 for (StoreInst *SI : Stores) {
3799 auto *LI = cast<LoadInst>(SI->getValueOperand());
3800 IntegerType *Ty = cast<IntegerType>(LI->getType());
3801 uint64_t StoreSize = Ty->getBitWidth() / 8;
3802 assert(StoreSize > 0 && "Cannot have a zero-sized integer store!");
3804 auto &Offsets = SplitOffsetsMap[SI];
3805 assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() &&
3806 "Slice size should always match load size exactly!");
3807 uint64_t BaseOffset = Offsets.S->beginOffset();
3808 assert(BaseOffset + StoreSize > BaseOffset &&
3809 "Cannot represent alloca access size using 64-bit integers!");
3811 Value *LoadBasePtr = LI->getPointerOperand();
3812 Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand());
3814 DEBUG(dbgs() << " Splitting store: " << *SI << "\n");
3816 // Check whether we have an already split load.
3817 auto SplitLoadsMapI = SplitLoadsMap.find(LI);
3818 std::vector<LoadInst *> *SplitLoads = nullptr;
3819 if (SplitLoadsMapI != SplitLoadsMap.end()) {
3820 SplitLoads = &SplitLoadsMapI->second;
3821 assert(SplitLoads->size() == Offsets.Splits.size() + 1 &&
3822 "Too few split loads for the number of splits in the store!");
3824 DEBUG(dbgs() << " of load: " << *LI << "\n");
3827 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
3828 int Idx = 0, Size = Offsets.Splits.size();
3830 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
3831 auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace());
3832 auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace());
3834 // Either lookup a split load or create one.
3837 PLoad = (*SplitLoads)[Idx];
3839 IRB.SetInsertPoint(LI);
3840 auto AS = LI->getPointerAddressSpace();
3841 PLoad = IRB.CreateAlignedLoad(
3842 getAdjustedPtr(IRB, DL, LoadBasePtr,
3843 APInt(DL.getIndexSizeInBits(AS), PartOffset),
3844 LoadPartPtrTy, LoadBasePtr->getName() + "."),
3845 getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
3849 // And store this partition.
3850 IRB.SetInsertPoint(SI);
3851 auto AS = SI->getPointerAddressSpace();
3852 StoreInst *PStore = IRB.CreateAlignedStore(
3854 getAdjustedPtr(IRB, DL, StoreBasePtr,
3855 APInt(DL.getIndexSizeInBits(AS), PartOffset),
3856 StorePartPtrTy, StoreBasePtr->getName() + "."),
3857 getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
3859 // Now build a new slice for the alloca.
3860 NewSlices.push_back(
3861 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
3862 &PStore->getOperandUse(PStore->getPointerOperandIndex()),
3863 /*IsSplittable*/ false));
3864 DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
3865 << ", " << NewSlices.back().endOffset() << "): " << *PStore
3868 DEBUG(dbgs() << " of split load: " << *PLoad << "\n");
3871 // See if we've finished all the splits.
3875 // Setup the next partition.
3876 PartOffset = Offsets.Splits[Idx];
3878 PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset;
3881 // We want to immediately iterate on any allocas impacted by splitting
3882 // this load, which is only relevant if it isn't a load of this alloca and
3883 // thus we didn't already split the loads above. We also have to keep track
3884 // of any promotable allocas we split loads on as they can no longer be
3887 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) {
3888 assert(OtherAI != &AI && "We can't re-split our own alloca!");
3889 ResplitPromotableAllocas.insert(OtherAI);
3890 Worklist.insert(OtherAI);
3891 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(
3892 LoadBasePtr->stripInBoundsOffsets())) {
3893 assert(OtherAI != &AI && "We can't re-split our own alloca!");
3894 Worklist.insert(OtherAI);
3898 // Mark the original store as dead now that we've split it up and kill its
3899 // slice. Note that we leave the original load in place unless this store
3900 // was its only use. It may in turn be split up if it is an alloca load
3901 // for some other alloca, but it may be a normal load. This may introduce
3902 // redundant loads, but where those can be merged the rest of the optimizer
3903 // should handle the merging, and this uncovers SSA splits which is more
3904 // important. In practice, the original loads will almost always be fully
3905 // split and removed eventually, and the splits will be merged by any
3906 // trivial CSE, including instcombine.
3907 if (LI->hasOneUse()) {
3908 assert(*LI->user_begin() == SI && "Single use isn't this store!");
3909 DeadInsts.insert(LI);
3911 DeadInsts.insert(SI);
3915 // Remove the killed slices that have ben pre-split.
3916 AS.erase(llvm::remove_if(AS, [](const Slice &S) { return S.isDead(); }),
3919 // Insert our new slices. This will sort and merge them into the sorted
3921 AS.insert(NewSlices);
3923 DEBUG(dbgs() << " Pre-split slices:\n");
3925 for (auto I = AS.begin(), E = AS.end(); I != E; ++I)
3926 DEBUG(AS.print(dbgs(), I, " "));
3929 // Finally, don't try to promote any allocas that new require re-splitting.
3930 // They have already been added to the worklist above.
3931 PromotableAllocas.erase(
3934 [&](AllocaInst *AI) { return ResplitPromotableAllocas.count(AI); }),
3935 PromotableAllocas.end());
3940 /// \brief Rewrite an alloca partition's users.
3942 /// This routine drives both of the rewriting goals of the SROA pass. It tries
3943 /// to rewrite uses of an alloca partition to be conducive for SSA value
3944 /// promotion. If the partition needs a new, more refined alloca, this will
3945 /// build that new alloca, preserving as much type information as possible, and
3946 /// rewrite the uses of the old alloca to point at the new one and have the
3947 /// appropriate new offsets. It also evaluates how successful the rewrite was
3948 /// at enabling promotion and if it was successful queues the alloca to be
3950 AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
3952 // Try to compute a friendly type for this partition of the alloca. This
3953 // won't always succeed, in which case we fall back to a legal integer type
3954 // or an i8 array of an appropriate size.
3955 Type *SliceTy = nullptr;
3956 const DataLayout &DL = AI.getModule()->getDataLayout();
3957 if (Type *CommonUseTy = findCommonType(P.begin(), P.end(), P.endOffset()))
3958 if (DL.getTypeAllocSize(CommonUseTy) >= P.size())
3959 SliceTy = CommonUseTy;
3961 if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(),
3962 P.beginOffset(), P.size()))
3963 SliceTy = TypePartitionTy;
3964 if ((!SliceTy || (SliceTy->isArrayTy() &&
3965 SliceTy->getArrayElementType()->isIntegerTy())) &&
3966 DL.isLegalInteger(P.size() * 8))
3967 SliceTy = Type::getIntNTy(*C, P.size() * 8);
3969 SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size());
3970 assert(DL.getTypeAllocSize(SliceTy) >= P.size());
3972 bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL);
3975 IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL);
3979 // Check for the case where we're going to rewrite to a new alloca of the
3980 // exact same type as the original, and with the same access offsets. In that
3981 // case, re-use the existing alloca, but still run through the rewriter to
3982 // perform phi and select speculation.
3983 // P.beginOffset() can be non-zero even with the same type in a case with
3984 // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll).
3986 if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) {
3988 // FIXME: We should be able to bail at this point with "nothing changed".
3989 // FIXME: We might want to defer PHI speculation until after here.
3990 // FIXME: return nullptr;
3992 unsigned Alignment = AI.getAlignment();
3994 // The minimum alignment which users can rely on when the explicit
3995 // alignment is omitted or zero is that required by the ABI for this
3997 Alignment = DL.getABITypeAlignment(AI.getAllocatedType());
3999 Alignment = MinAlign(Alignment, P.beginOffset());
4000 // If we will get at least this much alignment from the type alone, leave
4001 // the alloca's alignment unconstrained.
4002 if (Alignment <= DL.getABITypeAlignment(SliceTy))
4004 NewAI = new AllocaInst(
4005 SliceTy, AI.getType()->getAddressSpace(), nullptr, Alignment,
4006 AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI);
4010 DEBUG(dbgs() << "Rewriting alloca partition "
4011 << "[" << P.beginOffset() << "," << P.endOffset()
4012 << ") to: " << *NewAI << "\n");
4014 // Track the high watermark on the worklist as it is only relevant for
4015 // promoted allocas. We will reset it to this point if the alloca is not in
4016 // fact scheduled for promotion.
4017 unsigned PPWOldSize = PostPromotionWorklist.size();
4018 unsigned NumUses = 0;
4019 SmallSetVector<PHINode *, 8> PHIUsers;
4020 SmallSetVector<SelectInst *, 8> SelectUsers;
4022 AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(),
4023 P.endOffset(), IsIntegerPromotable, VecTy,
4024 PHIUsers, SelectUsers);
4025 bool Promotable = true;
4026 for (Slice *S : P.splitSliceTails()) {
4027 Promotable &= Rewriter.visit(S);
4030 for (Slice &S : P) {
4031 Promotable &= Rewriter.visit(&S);
4035 NumAllocaPartitionUses += NumUses;
4036 MaxUsesPerAllocaPartition.updateMax(NumUses);
4038 // Now that we've processed all the slices in the new partition, check if any
4039 // PHIs or Selects would block promotion.
4040 for (PHINode *PHI : PHIUsers)
4041 if (!isSafePHIToSpeculate(*PHI)) {
4044 SelectUsers.clear();
4048 for (SelectInst *Sel : SelectUsers)
4049 if (!isSafeSelectToSpeculate(*Sel)) {
4052 SelectUsers.clear();
4057 if (PHIUsers.empty() && SelectUsers.empty()) {
4058 // Promote the alloca.
4059 PromotableAllocas.push_back(NewAI);
4061 // If we have either PHIs or Selects to speculate, add them to those
4062 // worklists and re-queue the new alloca so that we promote in on the
4064 for (PHINode *PHIUser : PHIUsers)
4065 SpeculatablePHIs.insert(PHIUser);
4066 for (SelectInst *SelectUser : SelectUsers)
4067 SpeculatableSelects.insert(SelectUser);
4068 Worklist.insert(NewAI);
4071 // Drop any post-promotion work items if promotion didn't happen.
4072 while (PostPromotionWorklist.size() > PPWOldSize)
4073 PostPromotionWorklist.pop_back();
4075 // We couldn't promote and we didn't create a new partition, nothing
4080 // If we can't promote the alloca, iterate on it to check for new
4081 // refinements exposed by splitting the current alloca. Don't iterate on an
4082 // alloca which didn't actually change and didn't get promoted.
4083 Worklist.insert(NewAI);
4089 /// \brief Walks the slices of an alloca and form partitions based on them,
4090 /// rewriting each of their uses.
4091 bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
4092 if (AS.begin() == AS.end())
4095 unsigned NumPartitions = 0;
4096 bool Changed = false;
4097 const DataLayout &DL = AI.getModule()->getDataLayout();
4099 // First try to pre-split loads and stores.
4100 Changed |= presplitLoadsAndStores(AI, AS);
4102 // Now that we have identified any pre-splitting opportunities,
4103 // mark loads and stores unsplittable except for the following case.
4104 // We leave a slice splittable if all other slices are disjoint or fully
4105 // included in the slice, such as whole-alloca loads and stores.
4106 // If we fail to split these during pre-splitting, we want to force them
4107 // to be rewritten into a partition.
4108 bool IsSorted = true;
4110 uint64_t AllocaSize = DL.getTypeAllocSize(AI.getAllocatedType());
4111 const uint64_t MaxBitVectorSize = 1024;
4112 if (AllocaSize <= MaxBitVectorSize) {
4113 // If a byte boundary is included in any load or store, a slice starting or
4114 // ending at the boundary is not splittable.
4115 SmallBitVector SplittableOffset(AllocaSize + 1, true);
4117 for (unsigned O = S.beginOffset() + 1;
4118 O < S.endOffset() && O < AllocaSize; O++)
4119 SplittableOffset.reset(O);
4121 for (Slice &S : AS) {
4122 if (!S.isSplittable())
4125 if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) &&
4126 (S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()]))
4129 if (isa<LoadInst>(S.getUse()->getUser()) ||
4130 isa<StoreInst>(S.getUse()->getUser())) {
4131 S.makeUnsplittable();
4137 // We only allow whole-alloca splittable loads and stores
4138 // for a large alloca to avoid creating too large BitVector.
4139 for (Slice &S : AS) {
4140 if (!S.isSplittable())
4143 if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize)
4146 if (isa<LoadInst>(S.getUse()->getUser()) ||
4147 isa<StoreInst>(S.getUse()->getUser())) {
4148 S.makeUnsplittable();
4155 std::sort(AS.begin(), AS.end());
4157 /// Describes the allocas introduced by rewritePartition in order to migrate
4163 Fragment(AllocaInst *AI, uint64_t O, uint64_t S)
4164 : Alloca(AI), Offset(O), Size(S) {}
4166 SmallVector<Fragment, 4> Fragments;
4168 // Rewrite each partition.
4169 for (auto &P : AS.partitions()) {
4170 if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) {
4173 uint64_t SizeOfByte = 8;
4174 uint64_t AllocaSize = DL.getTypeSizeInBits(NewAI->getAllocatedType());
4175 // Don't include any padding.
4176 uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte);
4177 Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size));
4183 NumAllocaPartitions += NumPartitions;
4184 MaxPartitionsPerAlloca.updateMax(NumPartitions);
4186 // Migrate debug information from the old alloca to the new alloca(s)
4187 // and the individual partitions.
4188 TinyPtrVector<DbgInfoIntrinsic *> DbgDeclares = FindDbgAddrUses(&AI);
4189 if (!DbgDeclares.empty()) {
4190 auto *Var = DbgDeclares.front()->getVariable();
4191 auto *Expr = DbgDeclares.front()->getExpression();
4192 auto VarSize = Var->getSizeInBits();
4193 DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false);
4194 uint64_t AllocaSize = DL.getTypeSizeInBits(AI.getAllocatedType());
4195 for (auto Fragment : Fragments) {
4196 // Create a fragment expression describing the new partition or reuse AI's
4197 // expression if there is only one partition.
4198 auto *FragmentExpr = Expr;
4199 if (Fragment.Size < AllocaSize || Expr->isFragment()) {
4200 // If this alloca is already a scalar replacement of a larger aggregate,
4201 // Fragment.Offset describes the offset inside the scalar.
4202 auto ExprFragment = Expr->getFragmentInfo();
4203 uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0;
4204 uint64_t Start = Offset + Fragment.Offset;
4205 uint64_t Size = Fragment.Size;
4208 ExprFragment->OffsetInBits + ExprFragment->SizeInBits;
4209 if (Start >= AbsEnd)
4210 // No need to describe a SROAed padding.
4212 Size = std::min(Size, AbsEnd - Start);
4214 // The new, smaller fragment is stenciled out from the old fragment.
4215 if (auto OrigFragment = FragmentExpr->getFragmentInfo()) {
4216 assert(Start >= OrigFragment->OffsetInBits &&
4217 "new fragment is outside of original fragment");
4218 Start -= OrigFragment->OffsetInBits;
4221 // The alloca may be larger than the variable.
4223 if (Size > *VarSize)
4225 if (Size == 0 || Start + Size > *VarSize)
4229 // Avoid creating a fragment expression that covers the entire variable.
4230 if (!VarSize || *VarSize != Size) {
4232 DIExpression::createFragmentExpression(Expr, Start, Size))
4239 // Remove any existing intrinsics describing the same alloca.
4240 for (DbgInfoIntrinsic *OldDII : FindDbgAddrUses(Fragment.Alloca))
4241 OldDII->eraseFromParent();
4243 DIB.insertDeclare(Fragment.Alloca, Var, FragmentExpr,
4244 DbgDeclares.front()->getDebugLoc(), &AI);
4250 /// \brief Clobber a use with undef, deleting the used value if it becomes dead.
4251 void SROA::clobberUse(Use &U) {
4253 // Replace the use with an undef value.
4254 U = UndefValue::get(OldV->getType());
4256 // Check for this making an instruction dead. We have to garbage collect
4257 // all the dead instructions to ensure the uses of any alloca end up being
4259 if (Instruction *OldI = dyn_cast<Instruction>(OldV))
4260 if (isInstructionTriviallyDead(OldI)) {
4261 DeadInsts.insert(OldI);
4265 /// \brief Analyze an alloca for SROA.
4267 /// This analyzes the alloca to ensure we can reason about it, builds
4268 /// the slices of the alloca, and then hands it off to be split and
4269 /// rewritten as needed.
4270 bool SROA::runOnAlloca(AllocaInst &AI) {
4271 DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
4272 ++NumAllocasAnalyzed;
4274 // Special case dead allocas, as they're trivial.
4275 if (AI.use_empty()) {
4276 AI.eraseFromParent();
4279 const DataLayout &DL = AI.getModule()->getDataLayout();
4281 // Skip alloca forms that this analysis can't handle.
4282 if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
4283 DL.getTypeAllocSize(AI.getAllocatedType()) == 0)
4286 bool Changed = false;
4288 // First, split any FCA loads and stores touching this alloca to promote
4289 // better splitting and promotion opportunities.
4290 AggLoadStoreRewriter AggRewriter;
4291 Changed |= AggRewriter.rewrite(AI);
4293 // Build the slices using a recursive instruction-visiting builder.
4294 AllocaSlices AS(DL, AI);
4295 DEBUG(AS.print(dbgs()));
4299 // Delete all the dead users of this alloca before splitting and rewriting it.
4300 for (Instruction *DeadUser : AS.getDeadUsers()) {
4301 // Free up everything used by this instruction.
4302 for (Use &DeadOp : DeadUser->operands())
4305 // Now replace the uses of this instruction.
4306 DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType()));
4308 // And mark it for deletion.
4309 DeadInsts.insert(DeadUser);
4312 for (Use *DeadOp : AS.getDeadOperands()) {
4313 clobberUse(*DeadOp);
4317 // No slices to split. Leave the dead alloca for a later pass to clean up.
4318 if (AS.begin() == AS.end())
4321 Changed |= splitAlloca(AI, AS);
4323 DEBUG(dbgs() << " Speculating PHIs\n");
4324 while (!SpeculatablePHIs.empty())
4325 speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val());
4327 DEBUG(dbgs() << " Speculating Selects\n");
4328 while (!SpeculatableSelects.empty())
4329 speculateSelectInstLoads(*SpeculatableSelects.pop_back_val());
4334 /// \brief Delete the dead instructions accumulated in this run.
4336 /// Recursively deletes the dead instructions we've accumulated. This is done
4337 /// at the very end to maximize locality of the recursive delete and to
4338 /// minimize the problems of invalidated instruction pointers as such pointers
4339 /// are used heavily in the intermediate stages of the algorithm.
4341 /// We also record the alloca instructions deleted here so that they aren't
4342 /// subsequently handed to mem2reg to promote.
4343 bool SROA::deleteDeadInstructions(
4344 SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) {
4345 bool Changed = false;
4346 while (!DeadInsts.empty()) {
4347 Instruction *I = DeadInsts.pop_back_val();
4348 DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
4350 // If the instruction is an alloca, find the possible dbg.declare connected
4351 // to it, and remove it too. We must do this before calling RAUW or we will
4352 // not be able to find it.
4353 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
4354 DeletedAllocas.insert(AI);
4355 for (DbgInfoIntrinsic *OldDII : FindDbgAddrUses(AI))
4356 OldDII->eraseFromParent();
4359 I->replaceAllUsesWith(UndefValue::get(I->getType()));
4361 for (Use &Operand : I->operands())
4362 if (Instruction *U = dyn_cast<Instruction>(Operand)) {
4363 // Zero out the operand and see if it becomes trivially dead.
4365 if (isInstructionTriviallyDead(U))
4366 DeadInsts.insert(U);
4370 I->eraseFromParent();
4376 /// \brief Promote the allocas, using the best available technique.
4378 /// This attempts to promote whatever allocas have been identified as viable in
4379 /// the PromotableAllocas list. If that list is empty, there is nothing to do.
4380 /// This function returns whether any promotion occurred.
4381 bool SROA::promoteAllocas(Function &F) {
4382 if (PromotableAllocas.empty())
4385 NumPromoted += PromotableAllocas.size();
4387 DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
4388 PromoteMemToReg(PromotableAllocas, *DT, AC);
4389 PromotableAllocas.clear();
4393 PreservedAnalyses SROA::runImpl(Function &F, DominatorTree &RunDT,
4394 AssumptionCache &RunAC) {
4395 DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
4396 C = &F.getContext();
4400 BasicBlock &EntryBB = F.getEntryBlock();
4401 for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end());
4403 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
4404 Worklist.insert(AI);
4407 bool Changed = false;
4408 // A set of deleted alloca instruction pointers which should be removed from
4409 // the list of promotable allocas.
4410 SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
4413 while (!Worklist.empty()) {
4414 Changed |= runOnAlloca(*Worklist.pop_back_val());
4415 Changed |= deleteDeadInstructions(DeletedAllocas);
4417 // Remove the deleted allocas from various lists so that we don't try to
4418 // continue processing them.
4419 if (!DeletedAllocas.empty()) {
4420 auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); };
4421 Worklist.remove_if(IsInSet);
4422 PostPromotionWorklist.remove_if(IsInSet);
4423 PromotableAllocas.erase(llvm::remove_if(PromotableAllocas, IsInSet),
4424 PromotableAllocas.end());
4425 DeletedAllocas.clear();
4429 Changed |= promoteAllocas(F);
4431 Worklist = PostPromotionWorklist;
4432 PostPromotionWorklist.clear();
4433 } while (!Worklist.empty());
4436 return PreservedAnalyses::all();
4438 PreservedAnalyses PA;
4439 PA.preserveSet<CFGAnalyses>();
4440 PA.preserve<GlobalsAA>();
4444 PreservedAnalyses SROA::run(Function &F, FunctionAnalysisManager &AM) {
4445 return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F),
4446 AM.getResult<AssumptionAnalysis>(F));
4449 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass.
4451 /// This is in the llvm namespace purely to allow it to be a friend of the \c
4453 class llvm::sroa::SROALegacyPass : public FunctionPass {
4454 /// The SROA implementation.
4460 SROALegacyPass() : FunctionPass(ID) {
4461 initializeSROALegacyPassPass(*PassRegistry::getPassRegistry());
4464 bool runOnFunction(Function &F) override {
4465 if (skipFunction(F))
4468 auto PA = Impl.runImpl(
4469 F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
4470 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
4471 return !PA.areAllPreserved();
4474 void getAnalysisUsage(AnalysisUsage &AU) const override {
4475 AU.addRequired<AssumptionCacheTracker>();
4476 AU.addRequired<DominatorTreeWrapperPass>();
4477 AU.addPreserved<GlobalsAAWrapperPass>();
4478 AU.setPreservesCFG();
4481 StringRef getPassName() const override { return "SROA"; }
4484 char SROALegacyPass::ID = 0;
4486 FunctionPass *llvm::createSROAPass() { return new SROALegacyPass(); }
4488 INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa",
4489 "Scalar Replacement Of Aggregates", false, false)
4490 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
4491 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
4492 INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates",