LLVM: lib/Analysis/Loads.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

25

26using namespace llvm;

27

29

32 return Base->getPointerAlignment(DL) >= Alignment;

33}

34

35

36

41 unsigned MaxDepth) {

42 assert(V->getType()->isPointerTy() && "Base must be pointer");

43

44

45 if (MaxDepth-- == 0)

46 return false;

47

48

49 if (!Visited.insert(V).second)

50 return false;

51

52

53

54

55

56 if (const GEPOperator *GEP = dyn_cast(V)) {

58

60 if (GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||

62 .isMinValue())

63 return false;

64

65

66

67

68

69

70

71

72

75 CtxI, AC, DT, TLI, Visited, MaxDepth);

76 }

77

78

79 if (const BitCastOperator *BC = dyn_cast(V)) {

80 if (BC->getSrcTy()->isPointerTy())

82 BC->getOperand(0), Alignment, Size, DL, CtxI, AC, DT, TLI,

83 Visited, MaxDepth);

84 }

85

86

87 if (const SelectInst *Sel = dyn_cast(V)) {

89 Size, DL, CtxI, AC, DT, TLI,

90 Visited, MaxDepth) &&

92 Size, DL, CtxI, AC, DT, TLI,

93 Visited, MaxDepth);

94 }

95

96 auto IsKnownDeref = [&]() {

97 bool CheckForNonNull, CheckForFreed;

98 if (Size.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull,

99 CheckForFreed)) ||

100 CheckForFreed)

101 return false;

102 if (CheckForNonNull &&

104 return false;

105

106

107

108

109

110

111

112

113 auto *I = dyn_cast(V);

114 if (I && !isa(I))

116 return true;

117 };

118 if (IsKnownDeref()) {

119

120

121

123 }

124

125

126

127

128

129 if (const auto *Call = dyn_cast(V)) {

132 AC, DT, TLI, Visited, MaxDepth);

133

134

135

136

137

138

139

140

142

143

144

149 APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);

152 !V->canBeFreed()) {

153

154

155

156

158 }

159 }

160 }

161

162

163 if (const GCRelocateInst *RelocateInst = dyn_cast(V))

165 Alignment, Size, DL, CtxI, AC, DT,

166 TLI, Visited, MaxDepth);

167

170 Size, DL, CtxI, AC, DT, TLI,

171 Visited, MaxDepth);

172

174

175

178 bool IsAligned = V->getPointerAlignment(DL) >= Alignment;

180 V, {Attribute::Dereferenceable, Attribute::Alignment}, AC,

183 return false;

184 if (RK.AttrKind == Attribute::Alignment)

185 AlignRK = std::max(AlignRK, RK);

186 if (RK.AttrKind == Attribute::Dereferenceable)

187 DerefRK = std::max(DerefRK, RK);

188 IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value();

189 if (IsAligned && DerefRK &&

191 return true;

192 return false;

193

194 }))

195 return true;

196 }

197

198

199 return false;

200}

201

206

207

208

209

210

212 return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,

213 DT, TLI, Visited, 16);

214}

215

220

221

223 return false;

224

225

226

227

228

229

230 APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),

231 DL.getTypeStoreSize(Ty));

233 AC, DT, TLI);

234}

235

243 TLI);

244}

245

246

247

248

249

250

251

252

253

254

255

256

258

259 if (A == B)

260 return true;

261

262

263

264

265

266

267 if (isa(A) || isa(A) || isa(A) ||

268 isa(A))

269 if (const Instruction *BI = dyn_cast(B))

270 if (cast(A)->isIdenticalToWhenDefined(BI))

271 return true;

272

273

274 return false;

275}

276

282

283 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),

284 DL.getTypeStoreSize(LI->getType()).getFixedValue());

286

287 Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();

288

289

290

291 if (L->isLoopInvariant(Ptr))

293 HeaderFirstNonPHI, AC, &DT);

294

295

296

297 auto *AddRec = dyn_cast(SE.getSCEV(Ptr));

298 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())

299 return false;

300 auto* Step = dyn_cast(AddRec->getStepRecurrence(SE));

301 if (!Step)

302 return false;

303

305 if (!TC)

306 return false;

307

308

309

310 if (EltSize.sgt(Step->getAPInt()))

311 return false;

312

313

314

315

316

317

318 APInt AccessSize = TC * Step->getAPInt();

319

321 "implied by addrec definition");

323 if (auto *StartS = dyn_cast(AddRec->getStart())) {

324 Base = StartS->getValue();

325 } else if (auto *StartS = dyn_cast(AddRec->getStart())) {

326

327 const auto *Offset = dyn_cast(StartS->getOperand(0));

328 const auto *NewBase = dyn_cast(StartS->getOperand(1));

329 if (StartS->getNumOperands() == 2 && Offset && NewBase) {

330

331

332

333

334 if (Offset->getAPInt().isNegative())

335 return false;

336

337

338

339

340 if (Offset->getAPInt().urem(Alignment.value()) != 0)

341 return false;

342 Base = NewBase->getValue();

343 bool Overflow = false;

344 AccessSize = AccessSize.uadd_ov(Offset->getAPInt(), Overflow);

345 if (Overflow)

346 return false;

347 }

348 }

349

351 return false;

352

353

354

355

356 if (EltSize.urem(Alignment.value()) != 0)

357 return false;

359 HeaderFirstNonPHI, AC, &DT);

360}

361

364

365 return F.hasFnAttribute(Attribute::SanitizeThread) ||

366

367 F.hasFnAttribute(Attribute::SanitizeAddress) ||

368 F.hasFnAttribute(Attribute::SanitizeHWAddress);

369}

370

373}

374

375

376

377

378

379

380

381

382

383

384

385

392

393 const Instruction* CtxI = DT ? ScanFrom : nullptr;

395 TLI)) {

396

397

399 return true;

400 }

401

402 if (!ScanFrom)

403 return false;

404

405 if (Size.getBitWidth() > 64)

406 return false;

408

409

410

411

412

413

415 E = ScanFrom->getParent()->begin();

416

417

418

419 V = V->stripPointerCasts();

420

421 while (BBI != E) {

422 --BBI;

423

424

425

426 if (isa(BBI) && BBI->mayWriteToMemory() &&

427 !isa(BBI) && !isa(BBI))

428 return false;

429

430 Value *AccessedPtr;

431 Type *AccessedTy;

432 Align AccessedAlign;

433 if (LoadInst *LI = dyn_cast(BBI)) {

434

435

436

437 if (LI->isVolatile())

438 continue;

439 AccessedPtr = LI->getPointerOperand();

440 AccessedTy = LI->getType();

441 AccessedAlign = LI->getAlign();

442 } else if (StoreInst *SI = dyn_cast(BBI)) {

443

444 if (SI->isVolatile())

445 continue;

446 AccessedPtr = SI->getPointerOperand();

447 AccessedTy = SI->getValueOperand()->getType();

448 AccessedAlign = SI->getAlign();

449 } else

450 continue;

451

452 if (AccessedAlign < Alignment)

453 continue;

454

455

456 if (AccessedPtr == V &&

457 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))

458 return true;

459

461 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))

462 return true;

463 }

464 return false;

465}

466

473 TypeSize TySize = DL.getTypeStoreSize(Ty);

475 return false;

478 TLI);

479}

480

481

482

483

484

485

486

489 cl::desc("Use this to specify the default maximum number of instructions "

490 "to scan backward from a given instruction, when searching for "

491 "available loaded value"));

492

495 unsigned MaxInstsToScan,

497 unsigned *NumScanedInst) {

498

499 if (!Load->isUnordered())

500 return nullptr;

501

504 ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,

505 NumScanedInst);

506}

507

508

509

511 Type *LoadTy,

512 const Value *StorePtr,

513 Type *StoreTy,

515 APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);

516 APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);

518 DL, LoadOffset, false);

520 DL, StoreOffset, false);

521 if (LoadBase != StoreBase)

522 return false;

526 LoadOffset + LoadAccessSize.toRaw());

528 StoreOffset + StoreAccessSize.toRaw());

530}

531

533 Type *AccessTy, bool AtLeastAtomic,

535

536

537

538 if (LoadInst *LI = dyn_cast(Inst)) {

539

540

541 if (LI->isAtomic() < AtLeastAtomic)

542 return nullptr;

543

546 return nullptr;

547

549 if (IsLoadCSE)

550 *IsLoadCSE = true;

551 return LI;

552 }

553 }

554

555

556

557

558 if (StoreInst *SI = dyn_cast(Inst)) {

559

560

561 if (SI->isAtomic() < AtLeastAtomic)

562 return nullptr;

563

564 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();

566 return nullptr;

567

568 if (IsLoadCSE)

569 *IsLoadCSE = false;

570

571 Value *Val = SI->getValueOperand();

573 return Val;

574

576 TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy);

577 if (TypeSize::isKnownLE(LoadSize, StoreSize))

578 if (auto *C = dyn_cast(Val))

580 }

581

582 if (auto *MSI = dyn_cast(Inst)) {

583

584 if (AtLeastAtomic)

585 return nullptr;

586

587

588 auto *Val = dyn_cast(MSI->getValue());

589 auto *Len = dyn_cast(MSI->getLength());

590 if (!Val || !Len)

591 return nullptr;

592

593

594 Value *Dst = MSI->getDest();

596 return nullptr;

597

598 if (IsLoadCSE)

599 *IsLoadCSE = false;

600

601 TypeSize LoadTypeSize = DL.getTypeSizeInBits(AccessTy);

603 return nullptr;

604

605

607 if ((Len->getValue() * 8).ult(LoadSize))

608 return nullptr;

609

611 : Val->getValue().trunc(LoadSize);

612 ConstantInt *SplatC = ConstantInt::get(MSI->getContext(), Splat);

614 return SplatC;

615

616 return nullptr;

617 }

618

619 return nullptr;

620}

621

625 BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {

626 if (MaxInstsToScan == 0)

627 MaxInstsToScan = ~0U;

628

631

632 while (ScanFrom != ScanBB->begin()) {

633

634

637 continue;

638

639

640 ScanFrom++;

641

642 if (NumScanedInst)

643 ++(*NumScanedInst);

644

645

646 if (MaxInstsToScan-- == 0)

647 return nullptr;

648

649 --ScanFrom;

650

652 AtLeastAtomic, DL, IsLoadCSE))

654

655

656 if (StoreInst *SI = dyn_cast(Inst)) {

657 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();

658

659

660

661

662 if ((isa(StrippedPtr) || isa(StrippedPtr)) &&

663 (isa(StorePtr) || isa(StorePtr)) &&

664 StrippedPtr != StorePtr)

665 continue;

666

667 if (!AA) {

668

669

670

671

673 Loc.Ptr, AccessTy, SI->getPointerOperand(),

674 SI->getValueOperand()->getType(), DL))

675 continue;

676 } else {

677

678

680 continue;

681 }

682

683

684 ++ScanFrom;

685 return nullptr;

686 }

687

688

690

691

693 continue;

694

695

696 ++ScanFrom;

697 return nullptr;

698 }

699 }

700

701

702

703 return nullptr;

704}

705

707 bool *IsLoadCSE,

708 unsigned MaxInstsToScan) {

709 const DataLayout &DL = Load->getDataLayout();

710 Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();

711 BasicBlock *ScanBB = Load->getParent();

712 Type *AccessTy = Load->getType();

713 bool AtLeastAtomic = Load->isAtomic();

714

715 if (!Load->isUnordered())

716 return nullptr;

717

718

719

723 ScanBB->rend())) {

724 if (Inst.isDebugOrPseudoInst())

725 continue;

726

727 if (MaxInstsToScan-- == 0)

728 return nullptr;

729

731 AtLeastAtomic, DL, IsLoadCSE);

733 break;

734

735 if (Inst.mayWriteToMemory())

736 MustNotAliasInsts.push_back(&Inst);

737 }

738

739

740

743 for (Instruction *Inst : MustNotAliasInsts)

745 return nullptr;

746 }

747

749}

750

751

752

754 unsigned Limit = 40;

757

758 while (!Worklist.empty() && --Limit) {

759 auto *User = Worklist.pop_back_val();

761 continue;

762 if (isa<ICmpInst, PtrToIntInst>(User))

763 continue;

764 if (isa<PHINode, SelectInst>(User))

766 else

767 return false;

768 }

769

770 return Limit != 0;

771}

772

773

774

777

778

779 if (isa(To))

780 return true;

781 if (isa(To) &&

783 return true;

786}

787

790 assert(U->getType() == To->getType() && "values must have matching types");

791

793 return true;

794

796 return true;

798}

799

802 assert(From->getType() == To->getType() && "values must have matching types");

803

804 if (From->getType()->isPointerTy())

805 return true;

806

808}

809

815 if (auto *LI = dyn_cast(&I)) {

817 return false;

818 } else if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow())

819 return false;

820 }

821 }

822 return true;

823}

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

BlockVerifier::State From

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

@ Available

We know the block is fully available. This is a fixpoint.

static bool isAligned(const Value *Base, Align Alignment, const DataLayout &DL)

static bool AreEquivalentAddressValues(const Value *A, const Value *B)

Test if A and B will obviously have the same value.

static bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, const TargetLibraryInfo *TLI, SmallPtrSetImpl< const Value * > &Visited, unsigned MaxDepth)

Test if V is always a pointer to allocated and suitably aligned memory for a simple load or store.

static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, const DataLayout &DL)

cl::opt< bool > UseDerefAtPointSemantics

static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, Type *LoadTy, const Value *StorePtr, Type *StoreTy, const DataLayout &DL)

static bool isPointerUseReplacable(const Use &U)

static Value * getAvailableLoadStore(Instruction *Inst, const Value *Ptr, Type *AccessTy, bool AtLeastAtomic, const DataLayout &DL, bool *IsLoadCSE)

static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI)

This file provides utility analysis objects describing memory locations.

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

Class for arbitrary precision integers.

bool sgt(const APInt &RHS) const

Signed greater than comparison.

APInt urem(const APInt &RHS) const

Unsigned remainder operation.

APInt uadd_ov(const APInt &RHS, bool &Overflow) const

static APInt getSplat(unsigned NewLen, const APInt &V)

Return a value containing V broadcasted over NewLen bits.

bool getBoolValue() const

Convert APInt to a boolean value.

bool uge(const APInt &RHS) const

Unsigned greater or equal comparison.

A cache of @llvm.assume calls within a function.

LLVM Basic Block Representation.

iterator begin()

Instruction iterator methods.

const DataLayout & getDataLayout() const

Get the data layout of the module this basic block belongs to.

InstListType::iterator iterator

Instruction iterators...

This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...

ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)

static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)

Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.

This is the shared class of boolean and integer constants.

This class represents a range of values.

bool isEmptySet() const

Return true if this set contains no members.

ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const

Return the range that results from the intersection of this range with another range.

A parsed version of the target data layout string in and methods for querying it.

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

Represents calls to the gc.relocate intrinsic.

bool isDebugOrPseudoInst() const LLVM_READONLY

Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.

bool mayWriteToMemory() const LLVM_READONLY

Return true if this instruction may modify memory.

const Function * getFunction() const

Return the function this instruction belongs to.

const DataLayout & getDataLayout() const

Get the data layout of the module this instruction belongs to.

An instruction for reading from memory.

Value * getPointerOperand()

Align getAlign() const

Return the alignment of the access that is being performed.

static LocationSize precise(uint64_t Value)

Represents a single loop in the control flow graph.

Representation for a specific memory location.

static MemoryLocation get(const LoadInst *LI)

Return a location with information about the memory reference by the given instruction.

const Value * Ptr

The address of the start of the location.

The main scalar evolution driver.

const SCEV * getSCEV(Value *V)

Return a SCEV expression for the full generality of the specified expression.

unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)

Returns the upper bound of the loop trip count as a normal unsigned value.

bool isLoopInvariant(const SCEV *S, const Loop *L)

Return true if the value of the given SCEV is unchanging in the specified loop.

This class represents the LLVM 'select' instruction.

A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

iterator insert(iterator I, T &&Elt)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

Provides information about what library functions are available for the current target.

static constexpr TypeSize getFixed(ScalarTy ExactSize)

The instances of the Type class are immutable: once they are created, they are never changed.

bool isPointerTy() const

True if this is an instance of PointerType.

bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const

Return true if it makes sense to take the size of this type.

bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const

Return true if this is a type whose size is a known multiple of vscale.

static IntegerType * getInt8Ty(LLVMContext &C)

A Use represents the edge between a Value definition and its users.

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

user_iterator user_begin()

const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const

Accumulate the constant offset this value has compared to a base pointer.

const Value * stripPointerCasts() const

Strip off pointer casts, all-zero GEPs and address space casts.

LLVMContext & getContext() const

All values hold a context through their type.

constexpr ScalarTy getFixedValue() const

constexpr bool isScalable() const

Returns whether the quantity is scaled by a runtime quantity (vscale).

const ParentTy * getParent() const

self_iterator getIterator()

@ C

The default llvm calling convention, compatible with C.

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)

Return true if it is valid to use the assumptions provided by an assume intrinsic,...

const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)

This function returns call pointer argument that is considered the same by aliasing rules.

bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)

Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

Value * findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst)

Scan backwards to see if we have the value of the given pointer available locally within a small numb...

bool mustSuppressSpeculation(const LoadInst &LI)

Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...

Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)

Scan backwards to see if we have the value of the given load available locally within a small number ...

bool isDereferenceableReadOnlyLoop(Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)

Return true if the loop L cannot fault on any iteration and only contains read-only memory accesses.

RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache *AC=nullptr, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})

Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...

bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})

Compute the size of the object pointed by Ptr.

bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)

bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)

Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...

bool isModSet(const ModRefInfo MRI)

bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)

Return true if we know that executing a load from this value cannot trap.

Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)

Extract value of C at the given Offset reinterpreted as Ty.

cl::opt< unsigned > DefMaxInstsToScan

The default number of maximum instructions to scan in the block, used by FindAvailableLoadedValue().

bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)

Return true if the given value is known to be non-zero when defined.

const Value * getUnderlyingObjectAggressive(const Value *V)

Like getUnderlyingObject(), but will try harder to find a single underlying object.

bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)

Return true if this is always a dereferenceable pointer.

bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT, AssumptionCache *AC=nullptr, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)

Return true if we can prove that the given load (which is assumed to be within the specified loop) wo...

This struct is a compact representation of a valid (non-zero power of two) alignment.

uint64_t value() const

This is a hole in the type system and should not be abused.

Various options to control the behavior of getObjectSize.

bool NullIsUnknownSize

If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.

bool RoundToAlign

Whether to round the result up to the alignment of allocas, byval arguments, and global variables.

Represent one information held inside an operand bundle of an llvm.assume.