1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/BasicAliasAnalysis.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/CallSite.h"
34 #include "llvm/IR/Constant.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GetElementPtrTypeIterator.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
63 #define DEBUG_TYPE "basicaa"
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
70 /// SearchLimitReached / SearchTimes shows how often the limit of
71 /// to decompose GEPs is reached. It will affect the precision
72 /// of basic alias analysis.
73 STATISTIC(SearchLimitReached, "Number of times the limit to "
74 "decompose GEPs is reached");
75 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
77 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
78 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
79 /// careful with value equivalence. We use reachability to make sure a value
80 /// cannot be involved in a cycle.
81 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
83 // The max limit of the search depth in DecomposeGEPExpression() and
84 // GetUnderlyingObject(), both functions need to use the same search
85 // depth otherwise the algorithm in aliasGEP will assert.
86 static const unsigned MaxLookupSearchDepth = 6;
88 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
89 FunctionAnalysisManager::Invalidator &Inv) {
90 // We don't care if this analysis itself is preserved, it has no state. But
91 // we need to check that the analyses it depends on have been. Note that we
92 // may be created without handles to some analyses and in that case don't
94 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
95 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
96 (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)))
99 // Otherwise this analysis result remains valid.
103 //===----------------------------------------------------------------------===//
105 //===----------------------------------------------------------------------===//
107 /// Returns true if the pointer is to a function-local object that never
108 /// escapes from the function.
109 static bool isNonEscapingLocalObject(const Value *V) {
110 // If this is a local allocation, check to see if it escapes.
111 if (isa<AllocaInst>(V) || isNoAliasCall(V))
112 // Set StoreCaptures to True so that we can assume in our callers that the
113 // pointer is not the result of a load instruction. Currently
114 // PointerMayBeCaptured doesn't have any special analysis for the
115 // StoreCaptures=false case; if it did, our callers could be refined to be
117 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
119 // If this is an argument that corresponds to a byval or noalias argument,
120 // then it has not escaped before entering the function. Check if it escapes
121 // inside the function.
122 if (const Argument *A = dyn_cast<Argument>(V))
123 if (A->hasByValAttr() || A->hasNoAliasAttr())
124 // Note even if the argument is marked nocapture, we still need to check
125 // for copies made inside the function. The nocapture attribute only
126 // specifies that there are no copies made that outlive the function.
127 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
132 /// Returns true if the pointer is one which would have been considered an
133 /// escape by isNonEscapingLocalObject.
134 static bool isEscapeSource(const Value *V) {
135 if (ImmutableCallSite(V))
138 if (isa<Argument>(V))
141 // The load case works because isNonEscapingLocalObject considers all
142 // stores to be escapes (it passes true for the StoreCaptures argument
143 // to PointerMayBeCaptured).
144 if (isa<LoadInst>(V))
150 /// Returns the size of the object specified by V or UnknownSize if unknown.
151 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
152 const TargetLibraryInfo &TLI,
154 bool RoundToAlign = false) {
157 Opts.RoundToAlign = RoundToAlign;
158 Opts.NullIsUnknownSize = NullIsValidLoc;
159 if (getObjectSize(V, Size, DL, &TLI, Opts))
161 return MemoryLocation::UnknownSize;
164 /// Returns true if we can prove that the object specified by V is smaller than
166 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
167 const DataLayout &DL,
168 const TargetLibraryInfo &TLI,
169 bool NullIsValidLoc) {
170 // Note that the meanings of the "object" are slightly different in the
171 // following contexts:
172 // c1: llvm::getObjectSize()
173 // c2: llvm.objectsize() intrinsic
174 // c3: isObjectSmallerThan()
175 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
176 // refers to the "entire object".
178 // Consider this example:
179 // char *p = (char*)malloc(100)
182 // In the context of c1 and c2, the "object" pointed by q refers to the
183 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
185 // However, in the context of c3, the "object" refers to the chunk of memory
186 // being allocated. So, the "object" has 100 bytes, and q points to the middle
187 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
188 // parameter, before the llvm::getObjectSize() is called to get the size of
189 // entire object, we should:
190 // - either rewind the pointer q to the base-address of the object in
191 // question (in this case rewind to p), or
192 // - just give up. It is up to caller to make sure the pointer is pointing
193 // to the base address the object.
195 // We go for 2nd option for simplicity.
196 if (!isIdentifiedObject(V))
199 // This function needs to use the aligned object size because we allow
200 // reads a bit past the end given sufficient alignment.
201 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
202 /*RoundToAlign*/ true);
204 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
207 /// Returns true if we can prove that the object specified by V has size Size.
208 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
209 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
210 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
211 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
214 //===----------------------------------------------------------------------===//
215 // GetElementPtr Instruction Decomposition and Analysis
216 //===----------------------------------------------------------------------===//
218 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
219 /// B are constant integers.
221 /// Returns the scale and offset values as APInts and return V as a Value*, and
222 /// return whether we looked through any sign or zero extends. The incoming
223 /// Value is known to have IntegerType, and it may already be sign or zero
226 /// Note that this looks through extends, so the high bits may not be
227 /// represented in the result.
228 /*static*/ const Value *BasicAAResult::GetLinearExpression(
229 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
230 unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
231 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
232 assert(V->getType()->isIntegerTy() && "Not an integer value");
234 // Limit our recursion depth.
241 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
242 // If it's a constant, just convert it to an offset and remove the variable.
243 // If we've been called recursively, the Offset bit width will be greater
244 // than the constant's (the Offset's always as wide as the outermost call),
245 // so we'll zext here and process any extension in the isa<SExtInst> &
246 // isa<ZExtInst> cases below.
247 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
248 assert(Scale == 0 && "Constant values don't have a scale");
252 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
253 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
254 // If we've been called recursively, then Offset and Scale will be wider
255 // than the BOp operands. We'll always zext it here as we'll process sign
256 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
257 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
259 switch (BOp->getOpcode()) {
261 // We don't understand this instruction, so we can't decompose it any
266 case Instruction::Or:
267 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
269 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
276 case Instruction::Add:
277 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
278 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
281 case Instruction::Sub:
282 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
283 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
286 case Instruction::Mul:
287 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
288 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
292 case Instruction::Shl:
293 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
294 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
296 // We're trying to linearize an expression of the kind:
298 // where the shift count exceeds the bitwidth of the type.
299 // We can't decompose this further (the expression would return
301 if (Offset.getBitWidth() < RHS.getLimitedValue() ||
302 Scale.getBitWidth() < RHS.getLimitedValue()) {
308 Offset <<= RHS.getLimitedValue();
309 Scale <<= RHS.getLimitedValue();
310 // the semantics of nsw and nuw for left shifts don't match those of
311 // multiplications, so we won't propagate them.
316 if (isa<OverflowingBinaryOperator>(BOp)) {
317 NUW &= BOp->hasNoUnsignedWrap();
318 NSW &= BOp->hasNoSignedWrap();
324 // Since GEP indices are sign extended anyway, we don't care about the high
325 // bits of a sign or zero extended value - just scales and offsets. The
326 // extensions have to be consistent though.
327 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
328 Value *CastOp = cast<CastInst>(V)->getOperand(0);
329 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
330 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
331 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
332 const Value *Result =
333 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
334 Depth + 1, AC, DT, NSW, NUW);
336 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
337 // by just incrementing the number of bits we've extended by.
338 unsigned ExtendedBy = NewWidth - SmallWidth;
340 if (isa<SExtInst>(V) && ZExtBits == 0) {
341 // sext(sext(%x, a), b) == sext(%x, a + b)
344 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
345 // into sext(%x) + sext(c). We'll sext the Offset ourselves:
346 unsigned OldWidth = Offset.getBitWidth();
347 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
349 // We may have signed-wrapped, so don't decompose sext(%x + c) into
350 // sext(%x) + sext(c)
354 ZExtBits = OldZExtBits;
355 SExtBits = OldSExtBits;
357 SExtBits += ExtendedBy;
359 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
362 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
363 // zext(%x) + zext(c)
367 ZExtBits = OldZExtBits;
368 SExtBits = OldSExtBits;
370 ZExtBits += ExtendedBy;
381 /// To ensure a pointer offset fits in an integer of size PointerSize
382 /// (in bits) when that size is smaller than 64. This is an issue in
383 /// particular for 32b programs with negative indices that rely on two's
384 /// complement wrap-arounds for precise alias information.
385 static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
386 assert(PointerSize <= 64 && "Invalid PointerSize!");
387 unsigned ShiftBits = 64 - PointerSize;
388 return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
391 /// If V is a symbolic pointer expression, decompose it into a base pointer
392 /// with a constant offset and a number of scaled symbolic offsets.
394 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
395 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
396 /// specified amount, but which may have other unrepresented high bits. As
397 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
399 /// When DataLayout is around, this function is capable of analyzing everything
400 /// that GetUnderlyingObject can look through. To be able to do that
401 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
402 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
403 /// through pointer casts.
404 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
405 DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
407 // Limit recursion depth to limit compile time in crazy cases.
408 unsigned MaxLookup = MaxLookupSearchDepth;
411 Decomposed.StructOffset = 0;
412 Decomposed.OtherOffset = 0;
413 Decomposed.VarIndices.clear();
415 // See if this is a bitcast or GEP.
416 const Operator *Op = dyn_cast<Operator>(V);
418 // The only non-operator case we can handle are GlobalAliases.
419 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
420 if (!GA->isInterposable()) {
421 V = GA->getAliasee();
429 if (Op->getOpcode() == Instruction::BitCast ||
430 Op->getOpcode() == Instruction::AddrSpaceCast) {
431 V = Op->getOperand(0);
435 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
437 if (auto CS = ImmutableCallSite(V)) {
438 // CaptureTracking can know about special capturing properties of some
439 // intrinsics like launder.invariant.group, that can't be expressed with
440 // the attributes, but have properties like returning aliasing pointer.
441 // Because some analysis may assume that nocaptured pointer is not
442 // returned from some special intrinsic (because function would have to
443 // be marked with returns attribute), it is crucial to use this function
444 // because it should be in sync with CaptureTracking. Not using it may
445 // cause weird miscompilations where 2 aliasing pointers are assumed to
447 if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
453 // If it's not a GEP, hand it off to SimplifyInstruction to see if it
454 // can come up with something. This matches what GetUnderlyingObject does.
455 if (const Instruction *I = dyn_cast<Instruction>(V))
456 // TODO: Get a DominatorTree and AssumptionCache and use them here
457 // (these are both now available in this function, but this should be
458 // updated when GetUnderlyingObject is updated). TLI should be
460 if (const Value *Simplified =
461 SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
470 // Don't attempt to analyze GEPs over unsized objects.
471 if (!GEPOp->getSourceElementType()->isSized()) {
476 unsigned AS = GEPOp->getPointerAddressSpace();
477 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
478 gep_type_iterator GTI = gep_type_begin(GEPOp);
479 unsigned PointerSize = DL.getPointerSizeInBits(AS);
480 // Assume all GEP operands are constants until proven otherwise.
481 bool GepHasConstantOffset = true;
482 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
483 I != E; ++I, ++GTI) {
484 const Value *Index = *I;
485 // Compute the (potentially symbolic) offset in bytes for this index.
486 if (StructType *STy = GTI.getStructTypeOrNull()) {
487 // For a struct, add the member offset.
488 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
492 Decomposed.StructOffset +=
493 DL.getStructLayout(STy)->getElementOffset(FieldNo);
497 // For an array/pointer, add the element offset, explicitly scaled.
498 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
501 Decomposed.OtherOffset +=
502 DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
506 GepHasConstantOffset = false;
508 uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
509 unsigned ZExtBits = 0, SExtBits = 0;
511 // If the integer type is smaller than the pointer size, it is implicitly
512 // sign extended to pointer size.
513 unsigned Width = Index->getType()->getIntegerBitWidth();
514 if (PointerSize > Width)
515 SExtBits += PointerSize - Width;
517 // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
518 APInt IndexScale(Width, 0), IndexOffset(Width, 0);
519 bool NSW = true, NUW = true;
520 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
521 SExtBits, DL, 0, AC, DT, NSW, NUW);
523 // All GEP math happens in the width of the pointer type,
524 // so we can truncate the value to 64-bits as we don't handle
525 // currently pointers larger than 64 bits and we would crash
526 // later. TODO: Make `Scale` an APInt to avoid this problem.
527 if (IndexScale.getBitWidth() > 64)
528 IndexScale = IndexScale.sextOrTrunc(64);
530 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
531 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
532 Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
533 Scale *= IndexScale.getSExtValue();
535 // If we already had an occurrence of this index variable, merge this
536 // scale into it. For example, we want to handle:
537 // A[x][x] -> x*16 + x*4 -> x*20
538 // This also ensures that 'x' only appears in the index list once.
539 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
540 if (Decomposed.VarIndices[i].V == Index &&
541 Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
542 Decomposed.VarIndices[i].SExtBits == SExtBits) {
543 Scale += Decomposed.VarIndices[i].Scale;
544 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
549 // Make sure that we have a scale that makes sense for this target's
551 Scale = adjustToPointerSize(Scale, PointerSize);
554 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
555 static_cast<int64_t>(Scale)};
556 Decomposed.VarIndices.push_back(Entry);
560 // Take care of wrap-arounds
561 if (GepHasConstantOffset) {
562 Decomposed.StructOffset =
563 adjustToPointerSize(Decomposed.StructOffset, PointerSize);
564 Decomposed.OtherOffset =
565 adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
568 // Analyze the base pointer next.
569 V = GEPOp->getOperand(0);
570 } while (--MaxLookup);
572 // If the chain of expressions is too deep, just return early.
574 SearchLimitReached++;
578 /// Returns whether the given pointer value points to memory that is local to
579 /// the function, with global constants being considered local to all
581 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
583 assert(Visited.empty() && "Visited must be cleared after use!");
585 unsigned MaxLookup = 8;
586 SmallVector<const Value *, 16> Worklist;
587 Worklist.push_back(Loc.Ptr);
589 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
590 if (!Visited.insert(V).second) {
592 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
595 // An alloca instruction defines local memory.
596 if (OrLocal && isa<AllocaInst>(V))
599 // A global constant counts as local memory for our purposes.
600 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
601 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
602 // global to be marked constant in some modules and non-constant in
603 // others. GV may even be a declaration, not a definition.
604 if (!GV->isConstant()) {
606 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
611 // If both select values point to local memory, then so does the select.
612 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
613 Worklist.push_back(SI->getTrueValue());
614 Worklist.push_back(SI->getFalseValue());
618 // If all values incoming to a phi node point to local memory, then so does
620 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
621 // Don't bother inspecting phi nodes with many operands.
622 if (PN->getNumIncomingValues() > MaxLookup) {
624 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
626 for (Value *IncValue : PN->incoming_values())
627 Worklist.push_back(IncValue);
631 // Otherwise be conservative.
633 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
634 } while (!Worklist.empty() && --MaxLookup);
637 return Worklist.empty();
640 /// Returns the behavior when calling the given call site.
641 FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) {
642 if (CS.doesNotAccessMemory())
643 // Can't do better than this.
644 return FMRB_DoesNotAccessMemory;
646 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
648 // If the callsite knows it only reads memory, don't return worse
650 if (CS.onlyReadsMemory())
651 Min = FMRB_OnlyReadsMemory;
652 else if (CS.doesNotReadMemory())
653 Min = FMRB_DoesNotReadMemory;
655 if (CS.onlyAccessesArgMemory())
656 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
657 else if (CS.onlyAccessesInaccessibleMemory())
658 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
659 else if (CS.onlyAccessesInaccessibleMemOrArgMem())
660 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
662 // If CS has operand bundles then aliasing attributes from the function it
663 // calls do not directly apply to the CallSite. This can be made more
664 // precise in the future.
665 if (!CS.hasOperandBundles())
666 if (const Function *F = CS.getCalledFunction())
668 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
673 /// Returns the behavior when calling the given function. For use when the call
674 /// site is not known.
675 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
676 // If the function declares it doesn't access memory, we can't do better.
677 if (F->doesNotAccessMemory())
678 return FMRB_DoesNotAccessMemory;
680 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
682 // If the function declares it only reads memory, go with that.
683 if (F->onlyReadsMemory())
684 Min = FMRB_OnlyReadsMemory;
685 else if (F->doesNotReadMemory())
686 Min = FMRB_DoesNotReadMemory;
688 if (F->onlyAccessesArgMemory())
689 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
690 else if (F->onlyAccessesInaccessibleMemory())
691 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
692 else if (F->onlyAccessesInaccessibleMemOrArgMem())
693 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
698 /// Returns true if this is a writeonly (i.e Mod only) parameter.
699 static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
700 const TargetLibraryInfo &TLI) {
701 if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly))
704 // We can bound the aliasing properties of memset_pattern16 just as we can
705 // for memcpy/memset. This is particularly important because the
706 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
707 // whenever possible.
708 // FIXME Consider handling this in InferFunctionAttr.cpp together with other
711 if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
712 F == LibFunc_memset_pattern16 && TLI.has(F))
716 // TODO: memset_pattern4, memset_pattern8
717 // TODO: _chk variants
718 // TODO: strcmp, strcpy
723 ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
725 // Checking for known builtin intrinsics and target library functions.
726 if (isWriteOnlyParam(CS, ArgIdx, TLI))
727 return ModRefInfo::Mod;
729 if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
730 return ModRefInfo::Ref;
732 if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
733 return ModRefInfo::NoModRef;
735 return AAResultBase::getArgModRefInfo(CS, ArgIdx);
738 static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) {
739 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
740 return II && II->getIntrinsicID() == IID;
744 static const Function *getParent(const Value *V) {
745 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
746 if (!inst->getParent())
748 return inst->getParent()->getParent();
751 if (const Argument *arg = dyn_cast<Argument>(V))
752 return arg->getParent();
757 static bool notDifferentParent(const Value *O1, const Value *O2) {
759 const Function *F1 = getParent(O1);
760 const Function *F2 = getParent(O2);
762 return !F1 || !F2 || F1 == F2;
766 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
767 const MemoryLocation &LocB) {
768 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
769 "BasicAliasAnalysis doesn't support interprocedural queries.");
771 // If we have a directly cached entry for these locations, we have recursed
772 // through this once, so just return the cached results. Notably, when this
773 // happens, we don't clear the cache.
774 auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
775 if (CacheIt != AliasCache.end())
776 return CacheIt->second;
778 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
779 LocB.Size, LocB.AATags);
780 // AliasCache rarely has more than 1 or 2 elements, always use
781 // shrink_and_clear so it quickly returns to the inline capacity of the
782 // SmallDenseMap if it ever grows larger.
783 // FIXME: This should really be shrink_to_inline_capacity_and_clear().
784 AliasCache.shrink_and_clear();
785 VisitedPhiBBs.clear();
789 /// Checks to see if the specified callsite can clobber the specified memory
792 /// Since we only look at local properties of this function, we really can't
793 /// say much about this query. We do, however, use simple "address taken"
794 /// analysis on local objects.
795 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
796 const MemoryLocation &Loc) {
797 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
798 "AliasAnalysis query involving multiple functions!");
800 const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
802 // If this is a tail call and Loc.Ptr points to a stack location, we know that
803 // the tail call cannot access or modify the local stack.
804 // We cannot exclude byval arguments here; these belong to the caller of
805 // the current function not to the current function, and a tail callee
806 // may reference them.
807 if (isa<AllocaInst>(Object))
808 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
809 if (CI->isTailCall())
810 return ModRefInfo::NoModRef;
812 // If the pointer is to a locally allocated object that does not escape,
813 // then the call can not mod/ref the pointer unless the call takes the pointer
814 // as an argument, and itself doesn't capture it.
815 if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
816 isNonEscapingLocalObject(Object)) {
818 // Optimistically assume that call doesn't touch Object and check this
819 // assumption in the following loop.
820 ModRefInfo Result = ModRefInfo::NoModRef;
821 bool IsMustAlias = true;
823 unsigned OperandNo = 0;
824 for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
825 CI != CE; ++CI, ++OperandNo) {
826 // Only look at the no-capture or byval pointer arguments. If this
827 // pointer were passed to arguments that were neither of these, then it
828 // couldn't be no-capture.
829 if (!(*CI)->getType()->isPointerTy() ||
830 (!CS.doesNotCapture(OperandNo) &&
831 OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo)))
834 // Call doesn't access memory through this operand, so we don't care
835 // if it aliases with Object.
836 if (CS.doesNotAccessMemory(OperandNo))
839 // If this is a no-capture pointer argument, see if we can tell that it
840 // is impossible to alias the pointer we're checking.
842 getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object));
845 // Operand doesnt alias 'Object', continue looking for other aliases
848 // Operand aliases 'Object', but call doesn't modify it. Strengthen
849 // initial assumption and keep looking in case if there are more aliases.
850 if (CS.onlyReadsMemory(OperandNo)) {
851 Result = setRef(Result);
854 // Operand aliases 'Object' but call only writes into it.
855 if (CS.doesNotReadMemory(OperandNo)) {
856 Result = setMod(Result);
859 // This operand aliases 'Object' and call reads and writes into it.
860 // Setting ModRef will not yield an early return below, MustAlias is not
862 Result = ModRefInfo::ModRef;
866 // No operand aliases, reset Must bit. Add below if at least one aliases
867 // and all aliases found are MustAlias.
868 if (isNoModRef(Result))
871 // Early return if we improved mod ref information
872 if (!isModAndRefSet(Result)) {
873 if (isNoModRef(Result))
874 return ModRefInfo::NoModRef;
875 return IsMustAlias ? setMust(Result) : clearMust(Result);
879 // If the CallSite is to malloc or calloc, we can assume that it doesn't
880 // modify any IR visible value. This is only valid because we assume these
881 // routines do not read values visible in the IR. TODO: Consider special
882 // casing realloc and strdup routines which access only their arguments as
883 // well. Or alternatively, replace all of this with inaccessiblememonly once
884 // that's implemented fully.
885 auto *Inst = CS.getInstruction();
886 if (isMallocOrCallocLikeFn(Inst, &TLI)) {
887 // Be conservative if the accessed pointer may alias the allocation -
888 // fallback to the generic handling below.
889 if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
890 return ModRefInfo::NoModRef;
893 // The semantics of memcpy intrinsics forbid overlap between their respective
894 // operands, i.e., source and destination of any given memcpy must no-alias.
895 // If Loc must-aliases either one of these two locations, then it necessarily
896 // no-aliases the other.
897 if (auto *Inst = dyn_cast<AnyMemCpyInst>(CS.getInstruction())) {
898 AliasResult SrcAA, DestAA;
900 if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
902 // Loc is exactly the memcpy source thus disjoint from memcpy dest.
903 return ModRefInfo::Ref;
904 if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
906 // The converse case.
907 return ModRefInfo::Mod;
909 // It's also possible for Loc to alias both src and dest, or neither.
910 ModRefInfo rv = ModRefInfo::NoModRef;
911 if (SrcAA != NoAlias)
913 if (DestAA != NoAlias)
918 // While the assume intrinsic is marked as arbitrarily writing so that
919 // proper control dependencies will be maintained, it never aliases any
920 // particular memory location.
921 if (isIntrinsicCall(CS, Intrinsic::assume))
922 return ModRefInfo::NoModRef;
924 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
925 // that proper control dependencies are maintained but they never mods any
926 // particular memory location.
928 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
929 // heap state at the point the guard is issued needs to be consistent in case
930 // the guard invokes the "deopt" continuation.
931 if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
932 return ModRefInfo::Ref;
934 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
935 // writing so that proper control dependencies are maintained but they never
936 // mod any particular memory location visible to the IR.
937 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
938 // intrinsic is now modeled as reading memory. This prevents hoisting the
939 // invariant.start intrinsic over stores. Consider:
942 // invariant_start(ptr)
946 // This cannot be transformed to:
949 // invariant_start(ptr)
954 // The transformation will cause the second store to be ignored (based on
955 // rules of invariant.start) and print 40, while the first program always
957 if (isIntrinsicCall(CS, Intrinsic::invariant_start))
958 return ModRefInfo::Ref;
960 // The AAResultBase base class has some smarts, lets use them.
961 return AAResultBase::getModRefInfo(CS, Loc);
964 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
965 ImmutableCallSite CS2) {
966 // While the assume intrinsic is marked as arbitrarily writing so that
967 // proper control dependencies will be maintained, it never aliases any
968 // particular memory location.
969 if (isIntrinsicCall(CS1, Intrinsic::assume) ||
970 isIntrinsicCall(CS2, Intrinsic::assume))
971 return ModRefInfo::NoModRef;
973 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
974 // that proper control dependencies are maintained but they never mod any
975 // particular memory location.
977 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
978 // heap state at the point the guard is issued needs to be consistent in case
979 // the guard invokes the "deopt" continuation.
981 // NB! This function is *not* commutative, so we specical case two
982 // possibilities for guard intrinsics.
984 if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
985 return isModSet(createModRefInfo(getModRefBehavior(CS2)))
987 : ModRefInfo::NoModRef;
989 if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
990 return isModSet(createModRefInfo(getModRefBehavior(CS1)))
992 : ModRefInfo::NoModRef;
994 // The AAResultBase base class has some smarts, lets use them.
995 return AAResultBase::getModRefInfo(CS1, CS2);
998 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
999 /// both having the exact same pointer operand.
1000 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
1001 LocationSize V1Size,
1002 const GEPOperator *GEP2,
1003 LocationSize V2Size,
1004 const DataLayout &DL) {
1005 assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1006 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1007 GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
1008 "Expected GEPs with the same pointer operand");
1010 // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1011 // such that the struct field accesses provably cannot alias.
1012 // We also need at least two indices (the pointer, and the struct field).
1013 if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
1014 GEP1->getNumIndices() < 2)
1017 // If we don't know the size of the accesses through both GEPs, we can't
1018 // determine whether the struct fields accessed can't alias.
1019 if (V1Size == MemoryLocation::UnknownSize ||
1020 V2Size == MemoryLocation::UnknownSize)
1024 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
1026 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
1028 // If the last (struct) indices are constants and are equal, the other indices
1029 // might be also be dynamically equal, so the GEPs can alias.
1030 if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue())
1033 // Find the last-indexed type of the GEP, i.e., the type you'd get if
1034 // you stripped the last index.
1035 // On the way, look at each indexed type. If there's something other
1036 // than an array, different indices can lead to different final types.
1037 SmallVector<Value *, 8> IntermediateIndices;
1039 // Insert the first index; we don't need to check the type indexed
1040 // through it as it only drops the pointer indirection.
1041 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
1042 IntermediateIndices.push_back(GEP1->getOperand(1));
1044 // Insert all the remaining indices but the last one.
1045 // Also, check that they all index through arrays.
1046 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
1047 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
1048 GEP1->getSourceElementType(), IntermediateIndices)))
1050 IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1053 auto *Ty = GetElementPtrInst::getIndexedType(
1054 GEP1->getSourceElementType(), IntermediateIndices);
1055 StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1057 if (isa<SequentialType>(Ty)) {
1059 // - both GEPs begin indexing from the exact same pointer;
1060 // - the last indices in both GEPs are constants, indexing into a sequential
1061 // type (array or pointer);
1062 // - both GEPs only index through arrays prior to that.
1064 // Because array indices greater than the number of elements are valid in
1065 // GEPs, unless we know the intermediate indices are identical between
1066 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1067 // partially overlap. We also need to check that the loaded size matches
1068 // the element size, otherwise we could still have overlap.
1069 const uint64_t ElementSize =
1070 DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
1071 if (V1Size != ElementSize || V2Size != ElementSize)
1074 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1075 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1078 // Now we know that the array/pointer that GEP1 indexes into and that
1079 // that GEP2 indexes into must either precisely overlap or be disjoint.
1080 // Because they cannot partially overlap and because fields in an array
1081 // cannot overlap, if we can prove the final indices are different between
1082 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1084 // If the last indices are constants, we've already checked they don't
1085 // equal each other so we can exit early.
1089 Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1090 Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1091 if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1092 // If one of the indices is a PHI node, be safe and only use
1093 // computeKnownBits so we don't make any assumptions about the
1094 // relationships between the two indices. This is important if we're
1095 // asking about values from different loop iterations. See PR32314.
1096 // TODO: We may be able to change the check so we only do this when
1097 // we definitely looked through a PHINode.
1098 if (GEP1LastIdx != GEP2LastIdx &&
1099 GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1100 KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1101 KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1102 if (Known1.Zero.intersects(Known2.One) ||
1103 Known1.One.intersects(Known2.Zero))
1106 } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1110 } else if (!LastIndexedStruct || !C1 || !C2) {
1115 // - both GEPs begin indexing from the exact same pointer;
1116 // - the last indices in both GEPs are constants, indexing into a struct;
1117 // - said indices are different, hence, the pointed-to fields are different;
1118 // - both GEPs only index through arrays prior to that.
1120 // This lets us determine that the struct that GEP1 indexes into and the
1121 // struct that GEP2 indexes into must either precisely overlap or be
1122 // completely disjoint. Because they cannot partially overlap, indexing into
1123 // different non-overlapping fields of the struct will never alias.
1125 // Therefore, the only remaining thing needed to show that both GEPs can't
1126 // alias is that the fields are not overlapping.
1127 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1128 const uint64_t StructSize = SL->getSizeInBytes();
1129 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1130 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1132 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1133 uint64_t V2Off, uint64_t V2Size) {
1134 return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1135 ((V2Off + V2Size <= StructSize) ||
1136 (V2Off + V2Size - StructSize <= V1Off));
1139 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1140 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1146 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1147 // beginning of the object the GEP points would have a negative offset with
1148 // repsect to the alloca, that means the GEP can not alias pointer (b).
1149 // Note that the pointer based on the alloca may not be a GEP. For
1150 // example, it may be the alloca itself.
1151 // The same applies if (b) is based on a GlobalVariable. Note that just being
1152 // based on isIdentifiedObject() is not enough - we need an identified object
1153 // that does not permit access to negative offsets. For example, a negative
1154 // offset from a noalias argument or call can be inbounds w.r.t the actual
1155 // underlying object.
1157 // For example, consider:
1159 // struct { int f0, int f1, ...} foo;
1161 // foo* random = bar(alloca);
1162 // int *f0 = &alloca.f0
1163 // int *f1 = &random->f1;
1165 // Which is lowered, approximately, to:
1167 // %alloca = alloca %struct.foo
1168 // %random = call %struct.foo* @random(%struct.foo* %alloca)
1169 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1170 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1172 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1173 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1174 // point into the same object. But since %f0 points to the beginning of %alloca,
1175 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1176 // than (%alloca - 1), and so is not inbounds, a contradiction.
1177 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1178 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1179 LocationSize ObjectAccessSize) {
1180 // If the object access size is unknown, or the GEP isn't inbounds, bail.
1181 if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
1184 // We need the object to be an alloca or a globalvariable, and want to know
1185 // the offset of the pointer from the object precisely, so no variable
1186 // indices are allowed.
1187 if (!(isa<AllocaInst>(DecompObject.Base) ||
1188 isa<GlobalVariable>(DecompObject.Base)) ||
1189 !DecompObject.VarIndices.empty())
1192 int64_t ObjectBaseOffset = DecompObject.StructOffset +
1193 DecompObject.OtherOffset;
1195 // If the GEP has no variable indices, we know the precise offset
1196 // from the base, then use it. If the GEP has variable indices,
1197 // we can't get exact GEP offset to identify pointer alias. So return
1198 // false in that case.
1199 if (!DecompGEP.VarIndices.empty())
1201 int64_t GEPBaseOffset = DecompGEP.StructOffset;
1202 GEPBaseOffset += DecompGEP.OtherOffset;
1204 return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
1207 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1208 /// another pointer.
1210 /// We know that V1 is a GEP, but we don't know anything about V2.
1211 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1214 BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size,
1215 const AAMDNodes &V1AAInfo, const Value *V2,
1216 LocationSize V2Size, const AAMDNodes &V2AAInfo,
1217 const Value *UnderlyingV1, const Value *UnderlyingV2) {
1218 DecomposedGEP DecompGEP1, DecompGEP2;
1219 bool GEP1MaxLookupReached =
1220 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1221 bool GEP2MaxLookupReached =
1222 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1224 int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1225 int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1227 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1228 "DecomposeGEPExpression returned a result different from "
1229 "GetUnderlyingObject");
1231 // If the GEP's offset relative to its base is such that the base would
1232 // fall below the start of the object underlying V2, then the GEP and V2
1234 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1235 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1237 // If we have two gep instructions with must-alias or not-alias'ing base
1238 // pointers, figure out if the indexes to the GEP tell us anything about the
1240 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1241 // Check for the GEP base being at a negative offset, this time in the other
1243 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1244 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1246 // Do the base pointers alias?
1247 AliasResult BaseAlias =
1248 aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
1249 UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
1251 // Check for geps of non-aliasing underlying pointers where the offsets are
1253 if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1254 // Do the base pointers alias assuming type and size.
1255 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1256 UnderlyingV2, V2Size, V2AAInfo);
1257 if (PreciseBaseAlias == NoAlias) {
1258 // See if the computed offset from the common pointer tells us about the
1259 // relation of the resulting pointer.
1260 // If the max search depth is reached the result is undefined
1261 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1265 if (GEP1BaseOffset == GEP2BaseOffset &&
1266 DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1271 // If we get a No or May, then return it immediately, no amount of analysis
1272 // will improve this situation.
1273 if (BaseAlias != MustAlias) {
1274 assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1278 // Otherwise, we have a MustAlias. Since the base pointers alias each other
1279 // exactly, see if the computed offset from the common pointer tells us
1280 // about the relation of the resulting pointer.
1281 // If we know the two GEPs are based off of the exact same pointer (and not
1282 // just the same underlying object), see if that tells us anything about
1283 // the resulting pointers.
1284 if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1285 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1286 GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1287 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1288 // If we couldn't find anything interesting, don't abandon just yet.
1293 // If the max search depth is reached, the result is undefined
1294 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1297 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1298 // symbolic difference.
1299 GEP1BaseOffset -= GEP2BaseOffset;
1300 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1303 // Check to see if these two pointers are related by the getelementptr
1304 // instruction. If one pointer is a GEP with a non-zero index of the other
1305 // pointer, we know they cannot alias.
1307 // If both accesses are unknown size, we can't do anything useful here.
1308 if (V1Size == MemoryLocation::UnknownSize &&
1309 V2Size == MemoryLocation::UnknownSize)
1312 AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
1313 AAMDNodes(), V2, MemoryLocation::UnknownSize,
1314 V2AAInfo, nullptr, UnderlyingV2);
1315 if (R != MustAlias) {
1316 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1317 // If V2 is known not to alias GEP base pointer, then the two values
1318 // cannot alias per GEP semantics: "Any memory access must be done through
1319 // a pointer value associated with an address range of the memory access,
1320 // otherwise the behavior is undefined.".
1321 assert(R == NoAlias || R == MayAlias);
1325 // If the max search depth is reached the result is undefined
1326 if (GEP1MaxLookupReached)
1330 // In the two GEP Case, if there is no difference in the offsets of the
1331 // computed pointers, the resultant pointers are a must alias. This
1332 // happens when we have two lexically identical GEP's (for example).
1334 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1335 // must aliases the GEP, the end result is a must alias also.
1336 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1339 // If there is a constant difference between the pointers, but the difference
1340 // is less than the size of the associated memory object, then we know
1341 // that the objects are partially overlapping. If the difference is
1342 // greater, we know they do not overlap.
1343 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1344 if (GEP1BaseOffset >= 0) {
1345 if (V2Size != MemoryLocation::UnknownSize) {
1346 if ((uint64_t)GEP1BaseOffset < V2Size)
1347 return PartialAlias;
1351 // We have the situation where:
1354 // ---------------->|
1355 // |-->V1Size |-------> V2Size
1357 // We need to know that V2Size is not unknown, otherwise we might have
1358 // stripped a gep with negative index ('gep <ptr>, -1, ...).
1359 if (V1Size != MemoryLocation::UnknownSize &&
1360 V2Size != MemoryLocation::UnknownSize) {
1361 if (-(uint64_t)GEP1BaseOffset < V1Size)
1362 return PartialAlias;
1368 if (!DecompGEP1.VarIndices.empty()) {
1369 uint64_t Modulo = 0;
1370 bool AllPositive = true;
1371 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1373 // Try to distinguish something like &A[i][1] against &A[42][0].
1374 // Grab the least significant bit set in any of the scales. We
1375 // don't need std::abs here (even if the scale's negative) as we'll
1376 // be ^'ing Modulo with itself later.
1377 Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
1380 // If the Value could change between cycles, then any reasoning about
1381 // the Value this cycle may not hold in the next cycle. We'll just
1382 // give up if we can't determine conditions that hold for every cycle:
1383 const Value *V = DecompGEP1.VarIndices[i].V;
1385 KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT);
1386 bool SignKnownZero = Known.isNonNegative();
1387 bool SignKnownOne = Known.isNegative();
1389 // Zero-extension widens the variable, and so forces the sign
1391 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1392 SignKnownZero |= IsZExt;
1393 SignKnownOne &= !IsZExt;
1395 // If the variable begins with a zero then we know it's
1396 // positive, regardless of whether the value is signed or
1398 int64_t Scale = DecompGEP1.VarIndices[i].Scale;
1400 (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1404 Modulo = Modulo ^ (Modulo & (Modulo - 1));
1406 // We can compute the difference between the two addresses
1407 // mod Modulo. Check whether that difference guarantees that the
1408 // two locations do not alias.
1409 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1410 if (V1Size != MemoryLocation::UnknownSize &&
1411 V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
1412 V1Size <= Modulo - ModOffset)
1415 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1416 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1417 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1418 if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
1421 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1422 GEP1BaseOffset, &AC, DT))
1426 // Statically, we can see that the base objects are the same, but the
1427 // pointers have dynamic offsets which we can't resolve. And none of our
1428 // little tricks above worked.
1432 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1433 // If the results agree, take it.
1436 // A mix of PartialAlias and MustAlias is PartialAlias.
1437 if ((A == PartialAlias && B == MustAlias) ||
1438 (B == PartialAlias && A == MustAlias))
1439 return PartialAlias;
1440 // Otherwise, we don't know anything.
1444 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1445 /// against another.
1446 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI,
1447 LocationSize SISize,
1448 const AAMDNodes &SIAAInfo,
1449 const Value *V2, LocationSize V2Size,
1450 const AAMDNodes &V2AAInfo,
1451 const Value *UnderV2) {
1452 // If the values are Selects with the same condition, we can do a more precise
1453 // check: just check for aliases between the values on corresponding arms.
1454 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1455 if (SI->getCondition() == SI2->getCondition()) {
1456 AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1457 SI2->getTrueValue(), V2Size, V2AAInfo);
1458 if (Alias == MayAlias)
1460 AliasResult ThisAlias =
1461 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1462 SI2->getFalseValue(), V2Size, V2AAInfo);
1463 return MergeAliasResults(ThisAlias, Alias);
1466 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1467 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1469 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1470 SISize, SIAAInfo, UnderV2);
1471 if (Alias == MayAlias)
1474 AliasResult ThisAlias =
1475 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo,
1477 return MergeAliasResults(ThisAlias, Alias);
1480 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1482 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1483 const AAMDNodes &PNAAInfo, const Value *V2,
1484 LocationSize V2Size,
1485 const AAMDNodes &V2AAInfo,
1486 const Value *UnderV2) {
1487 // Track phi nodes we have visited. We use this information when we determine
1488 // value equivalence.
1489 VisitedPhiBBs.insert(PN->getParent());
1491 // If the values are PHIs in the same block, we can do a more precise
1492 // as well as efficient check: just check for aliases between the values
1493 // on corresponding edges.
1494 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1495 if (PN2->getParent() == PN->getParent()) {
1496 LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1497 MemoryLocation(V2, V2Size, V2AAInfo));
1499 std::swap(Locs.first, Locs.second);
1500 // Analyse the PHIs' inputs under the assumption that the PHIs are
1502 // If the PHIs are May/MustAlias there must be (recursively) an input
1503 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1504 // there must be an operation on the PHIs within the PHIs' value cycle
1505 // that causes a MayAlias.
1506 // Pretend the phis do not alias.
1507 AliasResult Alias = NoAlias;
1508 assert(AliasCache.count(Locs) &&
1509 "There must exist an entry for the phi node");
1510 AliasResult OrigAliasResult = AliasCache[Locs];
1511 AliasCache[Locs] = NoAlias;
1513 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1514 AliasResult ThisAlias =
1515 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1516 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1518 Alias = MergeAliasResults(ThisAlias, Alias);
1519 if (Alias == MayAlias)
1523 // Reset if speculation failed.
1524 if (Alias != NoAlias)
1525 AliasCache[Locs] = OrigAliasResult;
1530 SmallPtrSet<Value *, 4> UniqueSrc;
1531 SmallVector<Value *, 4> V1Srcs;
1532 bool isRecursive = false;
1533 for (Value *PV1 : PN->incoming_values()) {
1534 if (isa<PHINode>(PV1))
1535 // If any of the source itself is a PHI, return MayAlias conservatively
1536 // to avoid compile time explosion. The worst possible case is if both
1537 // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1538 // and 'n' are the number of PHI sources.
1541 if (EnableRecPhiAnalysis)
1542 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1543 // Check whether the incoming value is a GEP that advances the pointer
1544 // result of this PHI node (e.g. in a loop). If this is the case, we
1545 // would recurse and always get a MayAlias. Handle this case specially
1547 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1548 isa<ConstantInt>(PV1GEP->idx_begin())) {
1554 if (UniqueSrc.insert(PV1).second)
1555 V1Srcs.push_back(PV1);
1558 // If this PHI node is recursive, set the size of the accessed memory to
1559 // unknown to represent all the possible values the GEP could advance the
1562 PNSize = MemoryLocation::UnknownSize;
1565 aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
1566 PNSize, PNAAInfo, UnderV2);
1568 // Early exit if the check of the first PHI source against V2 is MayAlias.
1569 // Other results are not possible.
1570 if (Alias == MayAlias)
1573 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1574 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1575 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1576 Value *V = V1Srcs[i];
1578 AliasResult ThisAlias =
1579 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
1580 Alias = MergeAliasResults(ThisAlias, Alias);
1581 if (Alias == MayAlias)
1588 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1589 /// array references.
1590 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1591 AAMDNodes V1AAInfo, const Value *V2,
1592 LocationSize V2Size, AAMDNodes V2AAInfo,
1593 const Value *O1, const Value *O2) {
1594 // If either of the memory references is empty, it doesn't matter what the
1595 // pointer values are.
1596 if (V1Size == 0 || V2Size == 0)
1599 // Strip off any casts if they exist.
1600 V1 = V1->stripPointerCastsAndInvariantGroups();
1601 V2 = V2->stripPointerCastsAndInvariantGroups();
1603 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1604 // value for undef that aliases nothing in the program.
1605 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1608 // Are we checking for alias of the same value?
1609 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1610 // different iterations. We must therefore make sure that this is not the
1611 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1612 // happen by looking at the visited phi nodes and making sure they cannot
1614 if (isValueEqualInPotentialCycles(V1, V2))
1617 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1618 return NoAlias; // Scalars cannot alias each other
1620 // Figure out what objects these things are pointing to if we can.
1622 O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1625 O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1627 // Null values in the default address space don't point to any object, so they
1628 // don't alias any other pointer.
1629 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1630 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1632 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1633 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1637 // If V1/V2 point to two different objects, we know that we have no alias.
1638 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1641 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1642 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1643 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1646 // Function arguments can't alias with things that are known to be
1647 // unambigously identified at the function level.
1648 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1649 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1652 // If one pointer is the result of a call/invoke or load and the other is a
1653 // non-escaping local object within the same function, then we know the
1654 // object couldn't escape to a point where the call could return it.
1656 // Note that if the pointers are in different functions, there are a
1657 // variety of complications. A call with a nocapture argument may still
1658 // temporary store the nocapture argument's value in a temporary memory
1659 // location if that memory location doesn't escape. Or it may pass a
1660 // nocapture value to other functions as long as they don't capture it.
1661 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
1663 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
1667 // If the size of one access is larger than the entire object on the other
1668 // side, then we know such behavior is undefined and can assume no alias.
1669 bool NullIsValidLocation = NullPointerIsDefined(&F);
1670 if ((V1Size != MemoryLocation::UnknownSize &&
1671 isObjectSmallerThan(O2, V1Size, DL, TLI, NullIsValidLocation)) ||
1672 (V2Size != MemoryLocation::UnknownSize &&
1673 isObjectSmallerThan(O1, V2Size, DL, TLI, NullIsValidLocation)))
1676 // Check the cache before climbing up use-def chains. This also terminates
1677 // otherwise infinitely recursive queries.
1678 LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1679 MemoryLocation(V2, V2Size, V2AAInfo));
1681 std::swap(Locs.first, Locs.second);
1682 std::pair<AliasCacheTy::iterator, bool> Pair =
1683 AliasCache.insert(std::make_pair(Locs, MayAlias));
1685 return Pair.first->second;
1687 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1688 // GEP can't simplify, we don't even look at the PHI cases.
1689 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1691 std::swap(V1Size, V2Size);
1693 std::swap(V1AAInfo, V2AAInfo);
1695 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1696 AliasResult Result =
1697 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1698 if (Result != MayAlias)
1699 return AliasCache[Locs] = Result;
1702 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1705 std::swap(V1Size, V2Size);
1706 std::swap(V1AAInfo, V2AAInfo);
1708 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1709 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1710 V2, V2Size, V2AAInfo, O2);
1711 if (Result != MayAlias)
1712 return AliasCache[Locs] = Result;
1715 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1718 std::swap(V1Size, V2Size);
1719 std::swap(V1AAInfo, V2AAInfo);
1721 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1722 AliasResult Result =
1723 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
1724 if (Result != MayAlias)
1725 return AliasCache[Locs] = Result;
1728 // If both pointers are pointing into the same object and one of them
1729 // accesses the entire object, then the accesses must overlap in some way.
1731 if (V1Size != MemoryLocation::UnknownSize &&
1732 V2Size != MemoryLocation::UnknownSize &&
1733 (isObjectSize(O1, V1Size, DL, TLI, NullIsValidLocation) ||
1734 isObjectSize(O2, V2Size, DL, TLI, NullIsValidLocation)))
1735 return AliasCache[Locs] = PartialAlias;
1737 // Recurse back into the best AA results we have, potentially with refined
1738 // memory locations. We have already ensured that BasicAA has a MayAlias
1739 // cache result for these, so any recursion back into BasicAA won't loop.
1740 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1741 return AliasCache[Locs] = Result;
1744 /// Check whether two Values can be considered equivalent.
1746 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1747 /// they can not be part of a cycle in the value graph by looking at all
1748 /// visited phi nodes an making sure that the phis cannot reach the value. We
1749 /// have to do this because we are looking through phi nodes (That is we say
1750 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1751 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1756 const Instruction *Inst = dyn_cast<Instruction>(V);
1760 if (VisitedPhiBBs.empty())
1763 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1766 // Make sure that the visited phis cannot reach the Value. This ensures that
1767 // the Values cannot come from different iterations of a potential cycle the
1768 // phi nodes could be involved in.
1769 for (auto *P : VisitedPhiBBs)
1770 if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1776 /// Computes the symbolic difference between two de-composed GEPs.
1778 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1779 /// instructions GEP1 and GEP2 which have common base pointers.
1780 void BasicAAResult::GetIndexDifference(
1781 SmallVectorImpl<VariableGEPIndex> &Dest,
1782 const SmallVectorImpl<VariableGEPIndex> &Src) {
1786 for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1787 const Value *V = Src[i].V;
1788 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1789 int64_t Scale = Src[i].Scale;
1791 // Find V in Dest. This is N^2, but pointer indices almost never have more
1792 // than a few variable indexes.
1793 for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1794 if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1795 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1798 // If we found it, subtract off Scale V's from the entry in Dest. If it
1799 // goes to zero, remove the entry.
1800 if (Dest[j].Scale != Scale)
1801 Dest[j].Scale -= Scale;
1803 Dest.erase(Dest.begin() + j);
1808 // If we didn't consume this entry, add it to the end of the Dest list.
1810 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1811 Dest.push_back(Entry);
1816 bool BasicAAResult::constantOffsetHeuristic(
1817 const SmallVectorImpl<VariableGEPIndex> &VarIndices, LocationSize V1Size,
1818 LocationSize V2Size, int64_t BaseOffset, AssumptionCache *AC,
1819 DominatorTree *DT) {
1820 if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
1821 V2Size == MemoryLocation::UnknownSize)
1824 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1826 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1827 Var0.Scale != -Var1.Scale)
1830 unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1832 // We'll strip off the Extensions of Var0 and Var1 and do another round
1833 // of GetLinearExpression decomposition. In the example above, if Var0
1834 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1836 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1838 bool NSW = true, NUW = true;
1839 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1840 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1841 V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1844 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1845 V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1847 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1848 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1851 // We have a hit - Var0 and Var1 only differ by a constant offset!
1853 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1854 // Var1 is possible to calculate, but we're just interested in the absolute
1855 // minimum difference between the two. The minimum distance may occur due to
1856 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1857 // the minimum distance between %i and %i + 5 is 3.
1858 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1859 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1860 uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
1862 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1863 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1864 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1865 // V2Size can fit in the MinDiffBytes gap.
1866 return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
1867 V2Size + std::abs(BaseOffset) <= MinDiffBytes;
1870 //===----------------------------------------------------------------------===//
1871 // BasicAliasAnalysis Pass
1872 //===----------------------------------------------------------------------===//
1874 AnalysisKey BasicAA::Key;
1876 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1877 return BasicAAResult(F.getParent()->getDataLayout(),
1879 AM.getResult<TargetLibraryAnalysis>(F),
1880 AM.getResult<AssumptionAnalysis>(F),
1881 &AM.getResult<DominatorTreeAnalysis>(F),
1882 AM.getCachedResult<LoopAnalysis>(F));
1885 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1886 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1889 char BasicAAWrapperPass::ID = 0;
1891 void BasicAAWrapperPass::anchor() {}
1893 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
1894 "Basic Alias Analysis (stateless AA impl)", true, true)
1895 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1896 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1897 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1898 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
1899 "Basic Alias Analysis (stateless AA impl)", true, true)
1901 FunctionPass *llvm::createBasicAAWrapperPass() {
1902 return new BasicAAWrapperPass();
1905 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1906 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1907 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1908 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1909 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1911 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, TLIWP.getTLI(),
1912 ACT.getAssumptionCache(F), &DTWP.getDomTree(),
1913 LIWP ? &LIWP->getLoopInfo() : nullptr));
1918 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1919 AU.setPreservesAll();
1920 AU.addRequired<AssumptionCacheTracker>();
1921 AU.addRequired<DominatorTreeWrapperPass>();
1922 AU.addRequired<TargetLibraryInfoWrapperPass>();
1925 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1926 return BasicAAResult(
1927 F.getParent()->getDataLayout(),
1929 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
1930 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));