LLVM: lib/Analysis/Loads.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

27

28using namespace llvm;

29

32 return Base->getPointerAlignment(DL) >= Alignment;

33}

34

40 if (!CtxI)

41 return false;

42

43

46 bool PtrCanBeFreed = Ptr->canBeFreed();

49 Ptr, {Attribute::Dereferenceable, Attribute::Alignment}, *AC,

52 return false;

53 if (RK.AttrKind == Attribute::Alignment)

54 AlignRK = std::max(AlignRK, RK);

55

56

57

59 RK.AttrKind == Attribute::Dereferenceable)

60 DerefRK = std::max(DerefRK, RK);

61 IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value();

62 if (IsAligned && DerefRK && CheckSize(DerefRK))

63 return true;

64 return false;

65

66 });

67}

68

69

70

75 unsigned MaxDepth) {

76 assert(V->getType()->isPointerTy() && "Base must be pointer");

77

78

79 if (MaxDepth-- == 0)

80 return false;

81

82

83 if (!Visited.insert(V).second)

84 return false;

85

86

87

88

89

92

94 if (GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||

96 .isMinValue())

97 return false;

98

99

100

101

102

103

104

105

106

109 CtxI, AC, DT, TLI, Visited, MaxDepth);

110 }

111

112

114 if (BC->getSrcTy()->isPointerTy())

116 BC->getOperand(0), Alignment, Size, DL, CtxI, AC, DT, TLI,

117 Visited, MaxDepth);

118 }

119

120

123 Size, DL, CtxI, AC, DT, TLI,

124 Visited, MaxDepth) &&

126 Size, DL, CtxI, AC, DT, TLI,

127 Visited, MaxDepth);

128 }

129

130 auto IsKnownDeref = [&]() {

131 bool CheckForNonNull, CheckForFreed;

132 if (Size.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull,

133 CheckForFreed)) ||

134 CheckForFreed)

135 return false;

136 if (CheckForNonNull &&

138 return false;

139

140

141

142

143

144

145

146

150 return true;

151 };

152 if (IsKnownDeref()) {

153

154

155

157 }

158

159

160

161

162

166 AC, DT, TLI, Visited, MaxDepth);

167

168

169

170

171

172

173

174

176

177

178

183 APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);

186 !V->canBeFreed()) {

187

188

189

190

192 }

193 }

194 }

195

196

199 Alignment, Size, DL, CtxI, AC, DT,

200 TLI, Visited, MaxDepth);

201

204 Size, DL, CtxI, AC, DT, TLI,

205 Visited, MaxDepth);

206

208 V, Alignment,

211 },

212 DL, CtxI, AC, DT);

213}

214

219

220

221

222

223

225 return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,

226 DT, TLI, Visited, 16);

227}

228

233

234

235 if (!Ty->isSized() || Ty->isScalableTy())

236 return false;

237

238

239

240

241

242

243 APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),

244 DL.getTypeStoreSize(Ty));

246 AC, DT, TLI);

247}

248

258

259

260

261

262

263

264

265

266

267

268

269

271

272 if (A == B)

273 return true;

274

275

276

277

278

279

283 return true;

284

285

286 return false;

287}

288

295 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),

296 DL.getTypeStoreSize(LI->getType()).getFixedValue());

297

298

299

300 if (L->isLoopInvariant(Ptr))

302 Ptr, Alignment, EltSize, DL, &*L->getHeader()->getFirstNonPHIIt(), AC,

303 &DT);

304

307

308

309

310 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())

311 return false;

312

314 if (!Step)

315 return false;

316

317

318

319

320 if (EltSize.urem(Alignment.value()) != 0)

321 return false;

322

323

324 if (EltSize.ugt(Step->getAPInt().abs()))

325 return false;

326

327 const SCEV *MaxBECount =

330 const SCEV *BECount = Predicates

334 return false;

335 std::optionalScalarEvolution::LoopGuards LoopGuards;

336 const auto &[AccessStart, AccessEnd] =

338 &SE, nullptr, &DT, AC, LoopGuards);

341 return false;

342

343

344 const SCEV *PtrDiff = SE.getMinusSCEV(AccessEnd, AccessStart);

346 return false;

347

348 if (!LoopGuards)

349 LoopGuards.emplace(

351

352 APInt MaxPtrDiff =

354

356 APInt AccessSize;

357 const SCEV *AccessSizeSCEV = nullptr;

359 Base = NewBase->getValue();

360 AccessSize = MaxPtrDiff;

361 AccessSizeSCEV = PtrDiff;

363 if (MinAdd->getNumOperands() != 2)

364 return false;

365

368 if (Offset || !NewBase)

369 return false;

370

371

372

373

374

375 if (Offset->getAPInt().isNegative())

376 return false;

377

378

379

380

381 if (Offset->getAPInt().urem(Alignment.value()) != 0)

382 return false;

383

384 bool Overflow = false;

385 AccessSize = MaxPtrDiff.uadd_ov(Offset->getAPInt(), Overflow);

386 if (Overflow)

387 return false;

389 Base = NewBase->getValue();

390 } else

391 return false;

392

393 Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt();

394 if (BasicBlock *LoopPred = L->getLoopPredecessor()) {

396 CtxI = LoopPred->getTerminator();

397 }

399 Base, Alignment,

400 [&SE, AccessSizeSCEV, &LoopGuards](const RetainedKnowledge &RK) {

405 },

406 DL, CtxI, AC, &DT) ||

408 CtxI, AC, &DT);

409}

410

413

414 return F.hasFnAttribute(Attribute::SanitizeThread) ||

415

416 F.hasFnAttribute(Attribute::SanitizeAddress) ||

417 F.hasFnAttribute(Attribute::SanitizeHWAddress);

418}

419

423

424

425

426

427

428

429

430

431

432

433

434

441

442 const Instruction* CtxI = DT ? ScanFrom : nullptr;

444 TLI)) {

445

446

448 return true;

449 }

450

451 if (!ScanFrom)

452 return false;

453

454 if (Size.getBitWidth() > 64)

455 return false;

457

458

459

460

461

462

464 E = ScanFrom->getParent()->begin();

465

466

467

468 V = V->stripPointerCasts();

469

470 while (BBI != E) {

471 --BBI;

472

473

474

475 if (isa(BBI) && BBI->mayWriteToMemory() &&

477 return false;

478

479 Value *AccessedPtr;

480 Type *AccessedTy;

481 Align AccessedAlign;

483

484

485

486 if (LI->isVolatile())

487 continue;

488 AccessedPtr = LI->getPointerOperand();

489 AccessedTy = LI->getType();

490 AccessedAlign = LI->getAlign();

492

493 if (SI->isVolatile())

494 continue;

495 AccessedPtr = SI->getPointerOperand();

496 AccessedTy = SI->getValueOperand()->getType();

497 AccessedAlign = SI->getAlign();

498 } else

499 continue;

500

501 if (AccessedAlign < Alignment)

502 continue;

503

504

505 if (AccessedPtr == V &&

507 return true;

508

511 return true;

512 }

513 return false;

514}

515

522 TypeSize TySize = DL.getTypeStoreSize(Ty);

524 return false;

527 TLI);

528}

529

530

531

532

533

534

535

538 cl::desc("Use this to specify the default maximum number of instructions "

539 "to scan backward from a given instruction, when searching for "

540 "available loaded value"));

541

544 unsigned MaxInstsToScan,

546 unsigned *NumScanedInst) {

547

548 if (!Load->isUnordered())

549 return nullptr;

550

553 ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,

554 NumScanedInst);

555}

556

557

558

560 Type *LoadTy,

561 const Value *StorePtr,

562 Type *StoreTy,

564 APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);

565 APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);

567 DL, LoadOffset, false);

569 DL, StoreOffset, false);

570 if (LoadBase != StoreBase)

571 return false;

575 LoadOffset + LoadAccessSize.toRaw());

577 StoreOffset + StoreAccessSize.toRaw());

579}

580

582 Type *AccessTy, bool AtLeastAtomic,

584

585

586

588

589

590 if (LI->isAtomic() < AtLeastAtomic)

591 return nullptr;

592

595 return nullptr;

596

598 if (IsLoadCSE)

599 *IsLoadCSE = true;

600 return LI;

601 }

602 }

603

604

605

606

608

609

610 if (SI->isAtomic() < AtLeastAtomic)

611 return nullptr;

612

613 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();

615 return nullptr;

616

617 if (IsLoadCSE)

618 *IsLoadCSE = false;

619

620 Value *Val = SI->getValueOperand();

622 return Val;

623

625 TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy);

629 }

630

632

633 if (AtLeastAtomic)

634 return nullptr;

635

636

639 if (!Val || !Len)

640 return nullptr;

641

642

643 int64_t StoreOffset = 0, LoadOffset = 0;

644 const Value *StoreBase =

646 const Value *LoadBase =

648 if (StoreBase != LoadBase || LoadOffset < StoreOffset)

649 return nullptr;

650

651 if (IsLoadCSE)

652 *IsLoadCSE = false;

653

654 TypeSize LoadTypeSize = DL.getTypeSizeInBits(AccessTy);

656 return nullptr;

657

658

660 if ((Len->getValue() * 8).ult(LoadSize + (LoadOffset - StoreOffset) * 8))

661 return nullptr;

662

664 : Val->getValue().trunc(LoadSize);

665 ConstantInt *SplatC = ConstantInt::get(MSI->getContext(), Splat);

667 return SplatC;

668

669 return nullptr;

670 }

671

672 return nullptr;

673}

674

678 BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {

679 if (MaxInstsToScan == 0)

680 MaxInstsToScan = ~0U;

681

683 const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();

684

685 while (ScanFrom != ScanBB->begin()) {

686

687

690 continue;

691

692

693 ScanFrom++;

694

695 if (NumScanedInst)

696 ++(*NumScanedInst);

697

698

699 if (MaxInstsToScan-- == 0)

700 return nullptr;

701

702 --ScanFrom;

703

705 AtLeastAtomic, DL, IsLoadCSE))

707

708

710 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();

711

712

713

714

717 StrippedPtr != StorePtr)

718 continue;

719

720 if (AA) {

721

722

723

724

726 Loc.Ptr, AccessTy, SI->getPointerOperand(),

727 SI->getValueOperand()->getType(), DL))

728 continue;

729 } else {

730

731

733 continue;

734 }

735

736

737 ++ScanFrom;

738 return nullptr;

739 }

740

741

743

744

746 continue;

747

748

749 ++ScanFrom;

750 return nullptr;

751 }

752 }

753

754

755

756 return nullptr;

757}

758

760 bool *IsLoadCSE,

761 unsigned MaxInstsToScan) {

762 const DataLayout &DL = Load->getDataLayout();

763 Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();

764 BasicBlock *ScanBB = Load->getParent();

765 Type *AccessTy = Load->getType();

766 bool AtLeastAtomic = Load->isAtomic();

767

768 if (!Load->isUnordered())

769 return nullptr;

770

771

772

776 ScanBB->rend())) {

777 if (Inst.isDebugOrPseudoInst())

778 continue;

779

780 if (MaxInstsToScan-- == 0)

781 return nullptr;

782

784 AtLeastAtomic, DL, IsLoadCSE);

786 break;

787

788 if (Inst.mayWriteToMemory())

789 MustNotAliasInsts.push_back(&Inst);

790 }

791

792

793

796 for (Instruction *Inst : MustNotAliasInsts)

798 return nullptr;

799 }

800

802}

803

804

805

807 unsigned Limit = 40;

810

811 while (!Worklist.empty() && --Limit) {

812 auto *User = Worklist.pop_back_val();

814 continue;

816 continue;

817

818

820 continue;

823 else

824 return false;

825 }

826

827 return Limit != 0;

828}

829

830

831

834

835

837 return true;

840 return true;

843}

844

848 assert(U->getType() == Ty && "values must have matching types");

849

850 if (!Ty->isPointerTy())

851 return true;

852

853

855 return false;

856

858 return true;

859

860 bool HasNonAddressBits =

861 DL.getAddressSizeInBits(Ty) != DL.getPointerTypeSizeInBits(Ty);

863}

864

867 assert(From->getType() == To->getType() && "values must have matching types");

868

870 return true;

871

873}

874

883 NonDereferenceableAndAlignedLoads.push_back(LI);

884 } else if (I.mayReadFromMemory() || I.mayWriteToMemory() ||

885 I.mayThrow()) {

886 return false;

887 }

888 }

889 }

890 return true;

891}

892

896

899

900 while (true) {

902 if (GEP || GEP->getSourceElementType()->isScalableTy())

903 return Expr;

904

905 Value *VarIndex = nullptr;

906 for (Value *Index : GEP->indices()) {

908 continue;

909

910

911 if (Expr.Index || VarIndex)

912 return Expr;

913 VarIndex = Index;

914 }

915

916

918 return Expr;

919

920

921

922 Expr.BasePtr = GEP->getPointerOperand();

925 GTI != GTE; ++GTI) {

926 Value *Index = GTI.getOperand();

928 if (ConstOffset->isZero())

929 continue;

930 if (StructType *STy = GTI.getStructTypeOrNull()) {

931 unsigned ElementIdx = ConstOffset->getZExtValue();

934 continue;

935 }

936

937 APInt IndexedSize(BitWidth, GTI.getSequentialElementStride(DL),

938 false,

939 true);

940 Expr.Offset += ConstOffset->getValue() * IndexedSize;

941 continue;

942 }

943

944

945 assert(Expr.Index == nullptr && "Shouldn't have index yet");

946 Expr.Index = Index;

947

949 false, true);

950 }

951 }

952

953 return Expr;

954}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

@ Available

We know the block is fully available. This is a fixpoint.

static bool AreEquivalentAddressValues(const Value *A, const Value *B)

Test if A and B will obviously have the same value.

Definition Loads.cpp:270

static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, const DataLayout &DL)

Definition Loads.cpp:832

static bool isPointerUseReplacable(const Use &U, bool HasNonAddressBits)

Definition Loads.cpp:806

static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, Type *LoadTy, const Value *StorePtr, Type *StoreTy, const DataLayout &DL)

Definition Loads.cpp:559

static bool isDereferenceableAndAlignedPointerViaAssumption(const Value *Ptr, Align Alignment, function_ref< bool(const RetainedKnowledge &RK)> CheckSize, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT)

Definition Loads.cpp:35

static Value * getAvailableLoadStore(Instruction *Inst, const Value *Ptr, Type *AccessTy, bool AtLeastAtomic, const DataLayout &DL, bool *IsLoadCSE)

Definition Loads.cpp:581

static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI)

Definition Loads.cpp:411

This file provides utility analysis objects describing memory locations.

Class for arbitrary precision integers.

bool ugt(const APInt &RHS) const

Unsigned greater than comparison.

LLVM_ABI APInt urem(const APInt &RHS) const

Unsigned remainder operation.

LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const

static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)

Return a value containing V broadcasted over NewLen bits.

bool getBoolValue() const

Convert APInt to a boolean value.

bool uge(const APInt &RHS) const

Unsigned greater or equal comparison.

A cache of @llvm.assume calls within a function.

LLVM Basic Block Representation.

iterator begin()

Instruction iterator methods.

LLVM_ABI const DataLayout & getDataLayout() const

Get the data layout of the module this basic block belongs to.

InstListType::iterator iterator

Instruction iterators...

This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...

static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)

Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.

@ ICMP_ULE

unsigned less or equal

This is the shared class of boolean and integer constants.

This class represents a range of values.

LLVM_ABI bool isEmptySet() const

Return true if this set contains no members.

LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const

Return the range that results from the intersection of this range with another range.

A parsed version of the target data layout string in and methods for querying it.

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

Represents calls to the gc.relocate intrinsic.

GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const

Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).

LLVM_ABI bool isDebugOrPseudoInst() const LLVM_READONLY

Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.

LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY

Return true if this instruction may modify memory.

LLVM_ABI const Function * getFunction() const

Return the function this instruction belongs to.

LLVM_ABI const DataLayout & getDataLayout() const

Get the data layout of the module this instruction belongs to.

An instruction for reading from memory.

Value * getPointerOperand()

Align getAlign() const

Return the alignment of the access that is being performed.

static LocationSize precise(uint64_t Value)

Represents a single loop in the control flow graph.

Representation for a specific memory location.

static LLVM_ABI MemoryLocation get(const LoadInst *LI)

Return a location with information about the memory reference by the given instruction.

This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...

This class represents an analyzed expression in the program.

static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)

Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...

The main scalar evolution driver.

LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)

If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...

LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)

Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...

LLVM_ABI const SCEV * getSCEV(Value *V)

Return a SCEV expression for the full generality of the specified expression.

LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)

Return LHS-RHS.

APInt getUnsignedRangeMax(const SCEV *S)

Determine the max of the unsigned range for a particular SCEV.

LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)

Try to apply information from loop guards for L to Expr.

LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)

Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...

LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)

Get a canonical add expression, or something simpler if possible.

LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)

Test if the given expression is known to satisfy the condition described by Pred, LHS,...

const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)

When successful, this returns a SCEV that is greater than or equal to (i.e.

This class represents the LLVM 'select' instruction.

A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

iterator insert(iterator I, T &&Elt)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...

TypeSize getElementOffset(unsigned Idx) const

Class to represent struct types.

Provides information about what library functions are available for the current target.

static constexpr TypeSize getFixed(ScalarTy ExactSize)

The instances of the Type class are immutable: once they are created, they are never changed.

bool isPointerTy() const

True if this is an instance of PointerType.

static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)

bool isIntegerTy() const

True if this is an instance of IntegerType.

A Use represents the edge between a Value definition and its users.

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

user_iterator user_begin()

LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const

Returns an alignment of the pointer value.

LLVM_ABI bool canBeFreed() const

Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...

LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const

Accumulate the constant offset this value has compared to a base pointer.

LLVM_ABI const Value * stripPointerCasts() const

Strip off pointer casts, all-zero GEPs and address space casts.

LLVM_ABI LLVMContext & getContext() const

All values hold a context through their type.

constexpr ScalarTy getFixedValue() const

static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)

constexpr bool isScalable() const

Returns whether the quantity is scaled by a runtime quantity (vscale).

An efficient, type-erasing, non-owning reference to a callable.

const ParentTy * getParent() const

self_iterator getIterator()

Abstract Attribute helper functions.

@ C

The default llvm calling convention, compatible with C.

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, const Instruction *CtxI)

Returns true, if no instruction between Assume and CtxI may free memory and the function is marked as...

LLVM_ABI RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})

Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...

LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)

Return true if it is valid to use the assumptions provided by an assume intrinsic,...

LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)

This function returns call pointer argument that is considered the same by aliasing rules.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

bool isAligned(Align Lhs, uint64_t SizeInBytes)

Checks that SizeInBytes is a multiple of the alignment.

LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)

Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.

Definition Loads.cpp:229

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)

Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.

LLVM_ABI Value * findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst)

Scan backwards to see if we have the value of the given pointer available locally within a small numb...

Definition Loads.cpp:675

LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)

Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...

Definition Loads.cpp:420

gep_type_iterator gep_type_end(const User *GEP)

LLVM_ABI Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)

Scan backwards to see if we have the value of the given load available locally within a small number ...

Definition Loads.cpp:542

LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})

Compute the size of the object pointed by Ptr.

LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)

Definition Loads.cpp:845

LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)

Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...

Definition Loads.cpp:865

bool isModSet(const ModRefInfo MRI)

LLVM_ABI LinearExpression decomposeLinearExpression(const DataLayout &DL, Value *Ptr)

Decompose a pointer into a linear expression.

Definition Loads.cpp:893

LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)

Return true if we know that executing a load from this value cannot trap.

Definition Loads.cpp:435

generic_gep_type_iterator<> gep_type_iterator

LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)

Extract value of C at the given Offset reinterpreted as Ty.

LLVM_ABI cl::opt< unsigned > DefMaxInstsToScan

The default number of maximum instructions to scan in the block, used by FindAvailableLoadedValue().

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)

Return true if the given value is known to be non-zero when defined.

LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)

Like getUnderlyingObject(), but will try harder to find a single underlying object.

constexpr unsigned BitWidth

LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)

Return true if this is always a dereferenceable pointer.

Definition Loads.cpp:249

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

LLVM_ABI bool isReadOnlyLoop(Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, SmallVectorImpl< LoadInst * > &NonDereferenceableAndAlignedLoads, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)

Returns true if the loop contains read-only memory accesses and doesn't throw.

Definition Loads.cpp:875

gep_type_iterator gep_type_begin(const User *GEP)

LLVM_ABI bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT, AssumptionCache *AC=nullptr, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)

Return true if we can prove that the given load (which is assumed to be within the specified loop) wo...

Definition Loads.cpp:289

LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)

Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...

This struct is a compact representation of a valid (non-zero power of two) alignment.

constexpr uint64_t value() const

This is a hole in the type system and should not be abused.

Linear expression BasePtr + Index * Scale + Offset.

Various options to control the behavior of getObjectSize.

bool NullIsUnknownSize

If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.

bool RoundToAlign

Whether to round the result up to the alignment of allocas, byval arguments, and global variables.

Represent one information held inside an operand bundle of an llvm.assume.

Attribute::AttrKind AttrKind