LLVM: lib/Analysis/Loads.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
26
27using namespace llvm;
28
30
33 return Base->getPointerAlignment(DL) >= Alignment;
34}
35
36
37
42 unsigned MaxDepth) {
43 assert(V->getType()->isPointerTy() && "Base must be pointer");
44
45
46 if (MaxDepth-- == 0)
47 return false;
48
49
50 if (!Visited.insert(V).second)
51 return false;
52
53
54
55
56
57 if (const GEPOperator *GEP = dyn_cast(V)) {
59
61 if (->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
63 .isMinValue())
64 return false;
65
66
67
68
69
70
71
72
73
76 CtxI, AC, DT, TLI, Visited, MaxDepth);
77 }
78
79
80 if (const BitCastOperator *BC = dyn_cast(V)) {
81 if (BC->getSrcTy()->isPointerTy())
83 BC->getOperand(0), Alignment, Size, DL, CtxI, AC, DT, TLI,
84 Visited, MaxDepth);
85 }
86
87
88 if (const SelectInst *Sel = dyn_cast(V)) {
90 Size, DL, CtxI, AC, DT, TLI,
91 Visited, MaxDepth) &&
93 Size, DL, CtxI, AC, DT, TLI,
94 Visited, MaxDepth);
95 }
96
97 auto IsKnownDeref = [&]() {
98 bool CheckForNonNull, CheckForFreed;
99 if (.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull,
100 CheckForFreed)) ||
101 CheckForFreed)
102 return false;
103 if (CheckForNonNull &&
105 return false;
106
107
108
109
110
111
112
113
114 auto *I = dyn_cast(V);
117 return true;
118 };
119 if (IsKnownDeref()) {
120
121
122
124 }
125
126
127
128
129
130 if (const auto *Call = dyn_cast(V)) {
133 AC, DT, TLI, Visited, MaxDepth);
134
135
136
137
138
139
140
141
143
144
145
150 APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
153 !V->canBeFreed()) {
154
155
156
157
159 }
160 }
161 }
162
163
164 if (const GCRelocateInst *RelocateInst = dyn_cast(V))
166 Alignment, Size, DL, CtxI, AC, DT,
167 TLI, Visited, MaxDepth);
168
171 Size, DL, CtxI, AC, DT, TLI,
172 Visited, MaxDepth);
173
175
176
179 bool IsAligned = V->getPointerAlignment(DL) >= Alignment;
181 V, {Attribute::Dereferenceable, Attribute::Alignment}, AC,
184 return false;
185 if (RK.AttrKind == Attribute::Alignment)
186 AlignRK = std::max(AlignRK, RK);
187 if (RK.AttrKind == Attribute::Dereferenceable)
188 DerefRK = std::max(DerefRK, RK);
189 IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value();
190 if (IsAligned && DerefRK &&
192 return true;
193 return false;
194
195 }))
196 return true;
197 }
198
199
200 return false;
201}
202
207
208
209
210
211
213 return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,
214 DT, TLI, Visited, 16);
215}
216
221
222
224 return false;
225
226
227
228
229
230
231 APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
232 DL.getTypeStoreSize(Ty));
234 AC, DT, TLI);
235}
236
244 TLI);
245}
246
247
248
249
250
251
252
253
254
255
256
257
259
261 return true;
262
263
264
265
266
267
268 if (isa(A) || isa(A) || isa(A) ||
269 isa(A))
270 if (const Instruction *BI = dyn_cast(B))
271 if (cast(A)->isIdenticalToWhenDefined(BI))
272 return true;
273
274
275 return false;
276}
277
284 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
285 DL.getTypeStoreSize(LI->getType()).getFixedValue());
286
287
288
289 if (L->isLoopInvariant(Ptr))
291 Ptr, Alignment, EltSize, DL, &*L->getHeader()->getFirstNonPHIIt(), AC,
292 &DT);
293
295 auto *AddRec = dyn_cast(PtrScev);
296
297
298
299 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
300 return false;
301
302 auto *Step = dyn_cast(AddRec->getStepRecurrence(SE));
303 if (!Step)
304 return false;
305
306
307
308
309 if (EltSize.urem(Alignment.value()) != 0)
310 return false;
311
312
313 if (EltSize.ugt(Step->getAPInt().abs()))
314 return false;
315
316 const SCEV *MaxBECount =
319 if (isa(MaxBECount))
320 return false;
321
323 L, PtrScev, LI->getType(), MaxBECount, &SE, nullptr);
324 if (isa(AccessStart) ||
325 isa(AccessEnd))
326 return false;
327
328
329 const SCEV *PtrDiff = SE.getMinusSCEV(AccessEnd, AccessStart);
331
333 APInt AccessSize;
334 if (const SCEVUnknown *NewBase = dyn_cast(AccessStart)) {
335 Base = NewBase->getValue();
336 AccessSize = MaxPtrDiff;
337 } else if (auto *MinAdd = dyn_cast(AccessStart)) {
338 if (MinAdd->getNumOperands() != 2)
339 return false;
340
341 const auto *Offset = dyn_cast(MinAdd->getOperand(0));
342 const auto *NewBase = dyn_cast(MinAdd->getOperand(1));
343 if ( || !NewBase)
344 return false;
345
346
347
348
349
350 if (Offset->getAPInt().isNegative())
351 return false;
352
353
354
355
356 if (Offset->getAPInt().urem(Alignment.value()) != 0)
357 return false;
358
359 AccessSize = MaxPtrDiff + Offset->getAPInt();
360 Base = NewBase->getValue();
361 } else
362 return false;
363
364 Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();
366 HeaderFirstNonPHI, AC, &DT);
367}
368
371
372 return F.hasFnAttribute(Attribute::SanitizeThread) ||
373
374 F.hasFnAttribute(Attribute::SanitizeAddress) ||
375 F.hasFnAttribute(Attribute::SanitizeHWAddress);
376}
377
380}
381
382
383
384
385
386
387
388
389
390
391
392
399
400 const Instruction* CtxI = DT ? ScanFrom : nullptr;
402 TLI)) {
403
404
406 return true;
407 }
408
409 if (!ScanFrom)
410 return false;
411
412 if (Size.getBitWidth() > 64)
413 return false;
415
416
417
418
419
420
422 E = ScanFrom->getParent()->begin();
423
424
425
426 V = V->stripPointerCasts();
427
428 while (BBI != E) {
429 --BBI;
430
431
432
433 if (isa(BBI) && BBI->mayWriteToMemory() &&
434 !isa(BBI) && !isa(BBI))
435 return false;
436
437 Value *AccessedPtr;
438 Type *AccessedTy;
439 Align AccessedAlign;
440 if (LoadInst *LI = dyn_cast(BBI)) {
441
442
443
444 if (LI->isVolatile())
445 continue;
446 AccessedPtr = LI->getPointerOperand();
447 AccessedTy = LI->getType();
448 AccessedAlign = LI->getAlign();
449 } else if (StoreInst *SI = dyn_cast(BBI)) {
450
451 if (SI->isVolatile())
452 continue;
453 AccessedPtr = SI->getPointerOperand();
454 AccessedTy = SI->getValueOperand()->getType();
455 AccessedAlign = SI->getAlign();
456 } else
457 continue;
458
459 if (AccessedAlign < Alignment)
460 continue;
461
462
463 if (AccessedPtr == V &&
464 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
465 return true;
466
468 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
469 return true;
470 }
471 return false;
472}
473
480 TypeSize TySize = DL.getTypeStoreSize(Ty);
482 return false;
485 TLI);
486}
487
488
489
490
491
492
493
496 cl::desc("Use this to specify the default maximum number of instructions "
497 "to scan backward from a given instruction, when searching for "
498 "available loaded value"));
499
502 unsigned MaxInstsToScan,
504 unsigned *NumScanedInst) {
505
506 if (!Load->isUnordered())
507 return nullptr;
508
511 ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
512 NumScanedInst);
513}
514
515
516
518 Type *LoadTy,
519 const Value *StorePtr,
520 Type *StoreTy,
522 APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);
523 APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);
525 DL, LoadOffset, false);
527 DL, StoreOffset, false);
528 if (LoadBase != StoreBase)
529 return false;
533 LoadOffset + LoadAccessSize.toRaw());
535 StoreOffset + StoreAccessSize.toRaw());
537}
538
540 Type *AccessTy, bool AtLeastAtomic,
542
543
544
545 if (LoadInst *LI = dyn_cast(Inst)) {
546
547
548 if (LI->isAtomic() < AtLeastAtomic)
549 return nullptr;
550
553 return nullptr;
554
556 if (IsLoadCSE)
557 *IsLoadCSE = true;
558 return LI;
559 }
560 }
561
562
563
564
565 if (StoreInst *SI = dyn_cast(Inst)) {
566
567
568 if (SI->isAtomic() < AtLeastAtomic)
569 return nullptr;
570
571 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
573 return nullptr;
574
575 if (IsLoadCSE)
576 *IsLoadCSE = false;
577
578 Value *Val = SI->getValueOperand();
580 return Val;
581
583 TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy);
584 if (TypeSize::isKnownLE(LoadSize, StoreSize))
585 if (auto *C = dyn_cast(Val))
587 }
588
589 if (auto *MSI = dyn_cast(Inst)) {
590
591 if (AtLeastAtomic)
592 return nullptr;
593
594
595 auto *Val = dyn_cast(MSI->getValue());
596 auto *Len = dyn_cast(MSI->getLength());
597 if (!Val || !Len)
598 return nullptr;
599
600
601 Value *Dst = MSI->getDest();
603 return nullptr;
604
605 if (IsLoadCSE)
606 *IsLoadCSE = false;
607
608 TypeSize LoadTypeSize = DL.getTypeSizeInBits(AccessTy);
610 return nullptr;
611
612
614 if ((Len->getValue() * 8).ult(LoadSize))
615 return nullptr;
616
618 : Val->getValue().trunc(LoadSize);
619 ConstantInt *SplatC = ConstantInt::get(MSI->getContext(), Splat);
621 return SplatC;
622
623 return nullptr;
624 }
625
626 return nullptr;
627}
628
632 BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
633 if (MaxInstsToScan == 0)
634 MaxInstsToScan = ~0U;
635
638
639 while (ScanFrom != ScanBB->begin()) {
640
641
644 continue;
645
646
647 ScanFrom++;
648
649 if (NumScanedInst)
650 ++(*NumScanedInst);
651
652
653 if (MaxInstsToScan-- == 0)
654 return nullptr;
655
656 --ScanFrom;
657
659 AtLeastAtomic, DL, IsLoadCSE))
661
662
663 if (StoreInst *SI = dyn_cast(Inst)) {
664 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
665
666
667
668
669 if ((isa(StrippedPtr) || isa(StrippedPtr)) &&
670 (isa(StorePtr) || isa(StorePtr)) &&
671 StrippedPtr != StorePtr)
672 continue;
673
674 if (!AA) {
675
676
677
678
680 Loc.Ptr, AccessTy, SI->getPointerOperand(),
681 SI->getValueOperand()->getType(), DL))
682 continue;
683 } else {
684
685
687 continue;
688 }
689
690
691 ++ScanFrom;
692 return nullptr;
693 }
694
695
697
698
700 continue;
701
702
703 ++ScanFrom;
704 return nullptr;
705 }
706 }
707
708
709
710 return nullptr;
711}
712
714 bool *IsLoadCSE,
715 unsigned MaxInstsToScan) {
716 const DataLayout &DL = Load->getDataLayout();
717 Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
718 BasicBlock *ScanBB = Load->getParent();
719 Type *AccessTy = Load->getType();
720 bool AtLeastAtomic = Load->isAtomic();
721
722 if (!Load->isUnordered())
723 return nullptr;
724
725
726
730 ScanBB->rend())) {
731 if (Inst.isDebugOrPseudoInst())
732 continue;
733
734 if (MaxInstsToScan-- == 0)
735 return nullptr;
736
738 AtLeastAtomic, DL, IsLoadCSE);
740 break;
741
742 if (Inst.mayWriteToMemory())
743 MustNotAliasInsts.push_back(&Inst);
744 }
745
746
747
750 for (Instruction *Inst : MustNotAliasInsts)
752 return nullptr;
753 }
754
756}
757
758
759
761 unsigned Limit = 40;
764
765 while (!Worklist.empty() && --Limit) {
766 auto *User = Worklist.pop_back_val();
768 continue;
769 if (isa<ICmpInst, PtrToIntInst>(User))
770 continue;
771 if (isa<PHINode, SelectInst>(User))
773 else
774 return false;
775 }
776
777 return Limit != 0;
778}
779
780
781
784
785
786 if (isa(To))
787 return true;
788 if (isa(To) &&
790 return true;
793}
794
797 assert(U->getType() == To->getType() && "values must have matching types");
798
800 return true;
801
803 return true;
805}
806
809 assert(From->getType() == To->getType() && "values must have matching types");
810
811 if (->getType()->isPointerTy())
812 return true;
813
815}
816
822 if (auto *LI = dyn_cast(&I)) {
824 return false;
825 } else if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow())
826 return false;
827 }
828 }
829 return true;
830}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
@ Available
We know the block is fully available. This is a fixpoint.
static bool isAligned(const Value *Base, Align Alignment, const DataLayout &DL)
static bool AreEquivalentAddressValues(const Value *A, const Value *B)
Test if A and B will obviously have the same value.
static bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, const TargetLibraryInfo *TLI, SmallPtrSetImpl< const Value * > &Visited, unsigned MaxDepth)
Test if V is always a pointer to allocated and suitably aligned memory for a simple load or store.
static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, const DataLayout &DL)
cl::opt< bool > UseDerefAtPointSemantics
static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, Type *LoadTy, const Value *StorePtr, Type *StoreTy, const DataLayout &DL)
static bool isPointerUseReplacable(const Use &U)
static Value * getAvailableLoadStore(Instruction *Inst, const Value *Ptr, Type *AccessTy, bool AtLeastAtomic, const DataLayout &DL, bool *IsLoadCSE)
static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI)
This file provides utility analysis objects describing memory locations.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
bool getBoolValue() const
Convert APInt to a boolean value.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
InstListType::iterator iterator
Instruction iterators...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
This is the shared class of boolean and integer constants.
This class represents a range of values.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Represents calls to the gc.relocate intrinsic.
bool isDebugOrPseudoInst() const LLVM_READONLY
Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
const Function * getFunction() const
Return the function this instruction belongs to.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
An instruction for reading from memory.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
static LocationSize precise(uint64_t Value)
Represents a single loop in the control flow graph.
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
This class represents the LLVM 'select' instruction.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt8Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
self_iterator getIterator()
@ C
The default llvm calling convention, compatible with C.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst)
Scan backwards to see if we have the value of the given pointer available locally within a small numb...
bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
bool isDereferenceableReadOnlyLoop(Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Return true if the loop L cannot fault on any iteration and only contains read-only memory accesses.
RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache *AC=nullptr, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
bool isModSet(const ModRefInfo MRI)
bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
cl::opt< unsigned > DefMaxInstsToScan
The default number of maximum instructions to scan in the block, used by FindAvailableLoadedValue().
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT, AssumptionCache *AC=nullptr, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Return true if we can prove that the given load (which is assumed to be within the specified loop) wo...
std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *MaxBECount, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds)
Calculate Start and End points of memory access.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Represent one information held inside an operand bundle of an llvm.assume.