LLVM: lib/Analysis/LoopAccessAnalysis.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

60#include

61#include

62#include

63#include

64#include

65#include

66#include

67

68using namespace llvm;

70

71#define DEBUG_TYPE "loop-accesses"

72

75 cl::desc("Sets the SIMD width. Zero is autoselect."),

78

81 cl::desc("Sets the vectorization interleave count. "

82 "Zero is autoselect."),

86

88 "runtime-memory-check-threshold", cl::Hidden,

89 cl::desc("When performing memory disambiguation checks at runtime do not "

90 "generate more than this number of comparisons (default = 8)."),

93

94

96 "memory-check-merge-threshold", cl::Hidden,

97 cl::desc("Maximum number of comparisons done when trying to merge "

98 "runtime memory checks. (default = 100)"),

100

101

103

104

107 cl::desc("Maximum number of dependences collected by "

108 "loop-access analysis (default = 100)"),

110

111

112

113

114

115

116

117

118

119

120

121

124 cl::desc("Enable symbolic stride memory access versioning"));

125

126

127

129 "store-to-load-forwarding-conflict-detection", cl::Hidden,

130 cl::desc("Enable conflict detection in loop-access analysis"),

132

135 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),

137

139 "laa-speculate-unit-stride", cl::Hidden,

140 cl::desc("Speculate that non-constant strides are unit in LAA"),

142

146 "Hoist inner loop runtime memory checks to outer loop if possible"),

149

151 return ::VectorizationInterleave.getNumOccurrences() > 0;

152}

153

158

159

160

162 if (SI == PtrToStride.end())

163

164 return OrigSCEV;

165

166 const SCEV *StrideSCEV = SI->second;

167

168

169

170

171 assert(isa(StrideSCEV) && "shouldn't be in map");

172

177

178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV

179 << " by: " << *Expr << "\n");

180 return Expr;

181}

182

185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),

189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {

191}

192

194 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *MaxBECount,

196 DenseMap<std::pair<const SCEV *, Type *>,

197 std::pair<const SCEV *, const SCEV *>> *PointerBounds) {

198 std::pair<const SCEV *, const SCEV *> *PtrBoundsPair;

201 {{PtrExpr, AccessTy},

203 if (!Ins)

204 return Iter->second;

205 PtrBoundsPair = &Iter->second;

206 }

207

208 const SCEV *ScStart;

209 const SCEV *ScEnd;

210

212 ScStart = ScEnd = PtrExpr;

213 } else if (auto *AR = dyn_cast(PtrExpr)) {

214 ScStart = AR->getStart();

215 ScEnd = AR->evaluateAtIteration(MaxBECount, *SE);

216 const SCEV *Step = AR->getStepRecurrence(*SE);

217

218

219

220 if (const auto *CStep = dyn_cast(Step)) {

221 if (CStep->getValue()->isNegative())

223 } else {

224

225

226

227 ScStart = SE->getUMinExpr(ScStart, ScEnd);

228 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);

229 }

230 } else

232

235

236

238 Type *IdxTy = DL.getIndexType(PtrExpr->getType());

240 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);

241

242 std::pair<const SCEV *, const SCEV *> Res = {ScStart, ScEnd};

244 *PtrBoundsPair = Res;

245 return Res;

246}

247

248

249

251 Type *AccessTy, bool WritePtr,

252 unsigned DepSetId, unsigned ASId,

254 bool NeedsFreeze) {

258 assert(!isa(ScStart) &&

259 !isa(ScEnd) &&

260 "must be able to compute both start and end expressions");

261 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,

262 NeedsFreeze);

263}

264

265bool RuntimePointerChecking::tryToCreateDiffCheck(

267

268

269

271 return false;

272

275

276

277

280 return false;

281

286

287

288 if (AccSrc.size() != 1 || AccSink.size() != 1)

289 return false;

290

291

292 if (AccSink[0] < AccSrc[0])

294

295 auto *SrcAR = dyn_cast(Src->Expr);

296 auto *SinkAR = dyn_cast(Sink->Expr);

297 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||

299 return false;

300

307 if (isa(SrcTy) || isa(DstTy))

308 return false;

309

311 SinkAR->getLoop()->getHeader()->getDataLayout();

312 unsigned AllocSize =

313 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));

314

315

316

317

318 auto *Step = dyn_cast(SinkAR->getStepRecurrence(*SE));

319 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||

320 Step->getAPInt().abs() != AllocSize)

321 return false;

322

326

327

328 if (Step->getValue()->isNegative())

330

331 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);

333 if (isa(SinkStartInt) ||

334 isa(SrcStartInt))

335 return false;

336

337 const Loop *InnerLoop = SrcAR->getLoop();

338

339

340

341

343 isa(SinkStartInt) && isa(SrcStartInt)) {

344 auto *SrcStartAR = cast(SrcStartInt);

345 auto *SinkStartAR = cast(SinkStartInt);

346 const Loop *StartARLoop = SrcStartAR->getLoop();

347 if (StartARLoop == SinkStartAR->getLoop() &&

349

350

351

352 SrcStartAR->getStepRecurrence(*SE) !=

353 SinkStartAR->getStepRecurrence(*SE)) {

354 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "

355 "cannot be hoisted out of the outer loop\n");

356 return false;

357 }

358 }

359

360 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"

361 << "SrcStart: " << *SrcStartInt << '\n'

362 << "SinkStartInt: " << *SinkStartInt << '\n');

363 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,

364 Src->NeedsFreeze || Sink->NeedsFreeze);

365 return true;

366}

367

370

372 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {

375

377 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);

379 }

380 }

381 }

382 return Checks;

383}

384

385void RuntimePointerChecking::generateChecks(

387 assert(Checks.empty() && "Checks is not empty");

388 groupChecks(DepCands, UseDependencies);

390}

391

394 for (const auto &I : M.Members)

395 for (const auto &J : N.Members)

397 return true;

398 return false;

399}

400

401

402

406 if (!Diff)

407 return nullptr;

408 return Diff->isNegative() ? J : I;

409}

410

414 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,

415 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),

416 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);

417}

418

420 const SCEV *End, unsigned AS,

421 bool NeedsFreeze,

424 "all pointers in a checking group must be in the same address space");

425

426

427

428

430 if (!Min0)

431 return false;

432

434 if (!Min1)

435 return false;

436

437

438 if (Min0 == Start)

439 Low = Start;

440

441

442 if (Min1 != End)

444

447 return true;

448}

449

450void RuntimePointerChecking::groupChecks(

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496 if (!UseDependencies) {

497 for (unsigned I = 0; I < Pointers.size(); ++I)

499 return;

500 }

501

502 unsigned TotalComparisons = 0;

503

505 for (unsigned Index = 0; Index < Pointers.size(); ++Index)

506 PositionMap[Pointers[Index].PointerValue].push_back(Index);

507

508

509

511

512

513

514

515 for (unsigned I = 0; I < Pointers.size(); ++I) {

516

517

519 continue;

520

523

526

527

528

529

530

531

533 MI != ME; ++MI) {

534 auto PointerI = PositionMap.find(MI->getPointer());

535 assert(PointerI != PositionMap.end() &&

536 "pointer in equivalence class not found in PositionMap");

537 for (unsigned Pointer : PointerI->second) {

538 bool Merged = false;

539

540 Seen.insert(Pointer);

541

542

543

545

546

547

548

550 break;

551

552 TotalComparisons++;

553

554 if (Group.addPointer(Pointer, *this)) {

555 Merged = true;

556 break;

557 }

558 }

559

560 if (!Merged)

561

562

563

564 Groups.emplace_back(Pointer, *this);

565 }

566 }

567

568

569

571 }

572}

573

576 unsigned PtrIdx2) {

577 return (PtrToPartition[PtrIdx1] != -1 &&

578 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);

579}

580

584

585

587 return false;

588

589

591 return false;

592

593

595}

596

599 unsigned Depth) const {

600 unsigned N = 0;

601 for (const auto &[Check1, Check2] : Checks) {

602 const auto &First = Check1->Members, &Second = Check2->Members;

603

605

606 OS.indent(Depth + 2) << "Comparing group (" << Check1 << "):\n";

607 for (unsigned K : First)

609

610 OS.indent(Depth + 2) << "Against group (" << Check2 << "):\n";

611 for (unsigned K : Second)

613 }

614}

615

617

620

623 OS.indent(Depth + 2) << "Group " << &CG << ":\n";

624 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High

625 << ")\n";

626 for (unsigned Member : CG.Members) {

628 }

629 }

630}

631

632namespace {

633

634

635

636

637

638class AccessAnalysis {

639public:

640

643

648 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),

649 LoopAliasScopes(LoopAliasScopes) {

650

651 BAA.enableCrossIterationMode();

652 }

653

654

655 void addLoad(const MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {

657 AST.add(adjustLoc(Loc));

658 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);

659 if (IsReadOnly)

660 ReadOnlyPtr.insert(Ptr);

661 }

662

663

666 AST.add(adjustLoc(Loc));

667 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);

668 }

669

670

671

672

673

674

675

676

678 MemAccessInfo Access, Type *AccessTy,

681 Loop *TheLoop, unsigned &RunningDepId,

682 unsigned ASId, bool ShouldCheckStride, bool Assume);

683

684

685

686

687

688

691 Value *&UncomputablePtr, bool ShouldCheckWrap = false);

692

693

694

695 void buildDependenceSets() {

696 processMemAccesses();

697 }

698

699

700

701

702

703

704 bool isDependencyCheckNeeded() const { return !CheckDeps.empty(); }

705

706

708 CheckDeps.clear();

710 }

711

712 const MemAccessInfoList &getDependenciesToCheck() const { return CheckDeps; }

713

714private:

716

717

718

720

721

725 return Loc;

726 }

727

728

729 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {

730 if (!ScopeList)

731 return nullptr;

732

733

734

736 return LoopAliasScopes.contains(cast(Scope));

737 }))

738 return nullptr;

739

740 return ScopeList;

741 }

742

743

744

745 void processMemAccesses();

746

747

748

749 PtrAccessMap Accesses;

750

751

752 const Loop *TheLoop;

753

754

755 MemAccessInfoList CheckDeps;

756

757

759

760

762

763

764

766

767

769

770

771

772

774

775

776

777

778

779

780

781

782 bool IsRTCheckAnalysisNeeded = false;

783

784

786

788

789

790

792};

793

794}

795

796

797

798

800 const SCEV *PtrScev, Loop *L, bool Assume) {

801

803 return true;

804

805 const SCEVAddRecExpr *AR = dyn_cast(PtrScev);

806

807 if (!AR && Assume)

809

810 if (!AR)

811 return false;

812

814}

815

816

819 Type *AccessTy, Loop *L, bool Assume) {

822 return true;

823

824 return getPtrStride(PSE, AccessTy, Ptr, L, Strides, Assume).has_value() ||

826}

827

833

834 while (!WorkList.empty()) {

837 continue;

838 auto *PN = dyn_cast(Ptr);

839

840

841

842 if (PN && InnermostLoop.contains(PN->getParent()) &&

843 PN->getParent() != InnermostLoop.getHeader()) {

844 for (const Use &Inc : PN->incoming_values())

846 } else

847 AddPointer(Ptr);

848 }

849}

850

851

852

853

854

855

856

857

858

859

860

861

862

863

864

865

866

867

871 unsigned Depth) {

872

873

874

875

877 if (isa(Scev) || L->isLoopInvariant(Ptr) ||

878 !isa(Ptr) || Depth == 0) {

880 return;

881 }

882

884

886 return get<1>(S);

887 };

888

889 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {

890 switch (Opcode) {

891 case Instruction::Add:

893 case Instruction::Sub:

895 default:

896 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");

897 }

898 };

899

901 unsigned Opcode = I->getOpcode();

902 switch (Opcode) {

903 case Instruction::GetElementPtr: {

904 auto *GEP = cast(I);

905 Type *SourceTy = GEP->getSourceElementType();

906

907

908 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {

910 break;

911 }

916

917

918 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||

919 any_of(OffsetScevs, UndefPoisonCheck);

920

921

922

923

924 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)

925 BaseScevs.push_back(BaseScevs[0]);

926 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)

927 OffsetScevs.push_back(OffsetScevs[0]);

928 else {

929 ScevList.emplace_back(Scev, NeedsFreeze);

930 break;

931 }

932

933

936

937

938

939

941

942

947 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),

948 NeedsFreeze);

949 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),

950 NeedsFreeze);

951 break;

952 }

953 case Instruction::Select: {

955

956

957

960 if (ChildScevs.size() == 2) {

961 ScevList.push_back(ChildScevs[0]);

962 ScevList.push_back(ChildScevs[1]);

963 } else

965 break;

966 }

967 case Instruction::PHI: {

969

970

971

972 if (I->getNumOperands() == 2) {

975 }

976 if (ChildScevs.size() == 2) {

977 ScevList.push_back(ChildScevs[0]);

978 ScevList.push_back(ChildScevs[1]);

979 } else

981 break;

982 }

983 case Instruction::Add:

984 case Instruction::Sub: {

989

990

991 bool NeedsFreeze =

992 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);

993

994

995

996

997 if (LScevs.size() == 2 && RScevs.size() == 1)

999 else if (RScevs.size() == 2 && LScevs.size() == 1)

1001 else {

1002 ScevList.emplace_back(Scev, NeedsFreeze);

1003 break;

1004 }

1005

1006 ScevList.emplace_back(

1007 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),

1008 NeedsFreeze);

1009 ScevList.emplace_back(

1010 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),

1011 NeedsFreeze);

1012 break;

1013 }

1014 default:

1015

1016 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");

1018 break;

1019 }

1020}

1021

1025 const Loop *L) {

1030

1031

1032

1033 if (Scevs.size() == 2 &&

1034 (isa(get<0>(Scevs[0])) ||

1036 (isa(get<0>(Scevs[1])) ||

1038 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");

1039 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");

1040 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");

1041 return Scevs;

1042 }

1043

1045}

1046

1048 MemAccessInfo Access, Type *AccessTy,

1051 Loop *TheLoop, unsigned &RunningDepId,

1052 unsigned ASId, bool ShouldCheckWrap,

1053 bool Assume) {

1055

1058

1059 for (const auto &P : TranslatedPtrs) {

1060 const SCEV *PtrExpr = get<0>(P);

1062 return false;

1063

1064

1065

1066 if (ShouldCheckWrap) {

1067

1068 if (TranslatedPtrs.size() > 1)

1069 return false;

1070

1071 if (isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop, Assume))

1072 return false;

1073 }

1074

1075

1076 if (TranslatedPtrs.size() == 1)

1078 false};

1079 }

1080

1081 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {

1082

1083 unsigned DepId;

1084

1085 if (isDependencyCheckNeeded()) {

1087 unsigned &LeaderId = DepSetId[Leader];

1088 if (!LeaderId)

1089 LeaderId = RunningDepId++;

1090 DepId = LeaderId;

1091 } else

1092

1093 DepId = RunningDepId++;

1094

1095 bool IsWrite = Access.getInt();

1096 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,

1097 NeedsFreeze);

1098 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');

1099 }

1100

1101 return true;

1102}

1103

1107 Value *&UncomputablePtr, bool ShouldCheckWrap) {

1108

1109

1110 bool CanDoRT = true;

1111

1112 bool MayNeedRTCheck = false;

1113 if (!IsRTCheckAnalysisNeeded) return true;

1114

1115 bool IsDepCheckNeeded = isDependencyCheckNeeded();

1116

1117

1118

1119 unsigned ASId = 0;

1120 for (const auto &AS : AST) {

1121 int NumReadPtrChecks = 0;

1122 int NumWritePtrChecks = 0;

1123 bool CanDoAliasSetRT = true;

1124 ++ASId;

1125 auto ASPointers = AS.getPointers();

1126

1127

1128

1129 unsigned RunningDepId = 1;

1131

1133

1134

1135

1137 for (const Value *ConstPtr : ASPointers) {

1139 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));

1140 if (IsWrite)

1141 ++NumWritePtrChecks;

1142 else

1143 ++NumReadPtrChecks;

1145 }

1146

1147

1148

1149 if (NumWritePtrChecks == 0 ||

1150 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {

1151 assert((ASPointers.size() <= 1 ||

1154 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),

1155 true);

1156 return DepCands.findValue(AccessWrite) == DepCands.end();

1157 })) &&

1158 "Can only skip updating CanDoRT below, if all entries in AS "

1159 "are reads or there is at most 1 entry");

1160 continue;

1161 }

1162

1163 for (auto &Access : AccessInfos) {

1164 for (const auto &AccessTy : Accesses[Access]) {

1165 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,

1166 DepSetId, TheLoop, RunningDepId, ASId,

1167 ShouldCheckWrap, false)) {

1168 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"

1169 << *Access.getPointer() << '\n');

1171 CanDoAliasSetRT = false;

1172 }

1173 }

1174 }

1175

1176

1177

1178

1179

1180

1181

1182

1183

1184

1185 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();

1186

1187

1188

1189 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {

1190

1191

1192

1193 CanDoAliasSetRT = true;

1194 for (const auto &[Access, AccessTy] : Retries) {

1195 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,

1196 DepSetId, TheLoop, RunningDepId, ASId,

1197 ShouldCheckWrap, true)) {

1198 CanDoAliasSetRT = false;

1199 UncomputablePtr = Access.getPointer();

1200 break;

1201 }

1202 }

1203 }

1204

1205 CanDoRT &= CanDoAliasSetRT;

1206 MayNeedRTCheck |= NeedsAliasSetRTCheck;

1207 ++ASId;

1208 }

1209

1210

1211

1212

1213

1214

1215 unsigned NumPointers = RtCheck.Pointers.size();

1216 for (unsigned i = 0; i < NumPointers; ++i) {

1217 for (unsigned j = i + 1; j < NumPointers; ++j) {

1218

1219 if (RtCheck.Pointers[i].DependencySetId ==

1220 RtCheck.Pointers[j].DependencySetId)

1221 continue;

1222

1223 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)

1224 continue;

1225

1228

1231 if (ASi != ASj) {

1233 dbgs() << "LAA: Runtime check would require comparison between"

1234 " different address spaces\n");

1235 return false;

1236 }

1237 }

1238 }

1239

1240 if (MayNeedRTCheck && CanDoRT)

1242

1244 << " pointer comparisons.\n");

1245

1246

1247

1248

1250

1251 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;

1252 if (!CanDoRTIfNeeded)

1253 RtCheck.reset();

1254 return CanDoRTIfNeeded;

1255}

1256

1257void AccessAnalysis::processMemAccesses() {

1258

1259

1260

1261

1262 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");

1264 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");

1266 for (const auto &[A, _] : Accesses)

1267 dbgs() << "\t" << *A.getPointer() << " ("

1268 << (A.getInt() ? "write"

1269 : (ReadOnlyPtr.count(A.getPointer()) ? "read-only"

1270 : "read"))

1271 << ")\n";

1272 });

1273

1274

1275

1276

1277

1278 for (const auto &AS : AST) {

1279

1280

1281

1282 auto ASPointers = AS.getPointers();

1283

1284 bool SetHasWrite = false;

1285

1286

1288 UnderlyingObjToAccessMap ObjToLastAccess;

1289

1290

1291 PtrAccessMap DeferredAccesses;

1292

1293

1294

1295 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {

1296 bool UseDeferred = SetIteration > 0;

1297 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;

1298

1299 for (const Value *ConstPtr : ASPointers) {

1301

1302

1303

1304 for (const auto &[AC, _] : S) {

1305 if (AC.getPointer() != Ptr)

1306 continue;

1307

1308 bool IsWrite = AC.getInt();

1309

1310

1311

1312 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;

1313 if (UseDeferred && !IsReadOnlyPtr)

1314 continue;

1315

1316

1317 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||

1318 S.count(MemAccessInfo(Ptr, false))) &&

1319 "Alias-set pointer not in the access set?");

1320

1321 MemAccessInfo Access(Ptr, IsWrite);

1323

1324

1325

1326

1327

1328

1329 if (!UseDeferred && IsReadOnlyPtr) {

1330

1331

1332 DeferredAccesses.insert({Access, {}});

1333 continue;

1334 }

1335

1336

1337

1338

1339

1340 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {

1341 CheckDeps.push_back(Access);

1342 IsRTCheckAnalysisNeeded = true;

1343 }

1344

1345 if (IsWrite)

1346 SetHasWrite = true;

1347

1348

1349

1351 ValueVector TempObjects;

1352

1353 UnderlyingObjects[Ptr] = {};

1357 << "Underlying objects for pointer " << *Ptr << "\n");

1358 for (const Value *UnderlyingObj : UOs) {

1359

1360

1361 if (isa(UnderlyingObj) &&

1365 continue;

1366

1367 UnderlyingObjToAccessMap::iterator Prev =

1368 ObjToLastAccess.find(UnderlyingObj);

1369 if (Prev != ObjToLastAccess.end())

1371

1372 ObjToLastAccess[UnderlyingObj] = Access;

1373 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");

1374 }

1375 }

1376 }

1377 }

1378 }

1379}

1380

1381

1382

1385

1386

1388 return true;

1389

1391 return true;

1392

1393

1394

1395

1396

1397

1398

1399

1400

1401 const auto *GEP = dyn_cast(Ptr);

1402 if (GEP || GEP->hasNoUnsignedSignedWrap())

1403 return false;

1404

1405

1406 Value *NonConstIndex = nullptr;

1407 for (Value *Index : GEP->indices())

1408 if (!isa(Index)) {

1409 if (NonConstIndex)

1410 return false;

1411 NonConstIndex = Index;

1412 }

1413 if (!NonConstIndex)

1414

1415 return false;

1416

1417

1418

1419 if (auto *OBO = dyn_cast(NonConstIndex))

1420 if (OBO->hasNoSignedWrap() &&

1421

1422

1423 isa(OBO->getOperand(1))) {

1424 const SCEV *OpScev = PSE.getSCEV(OBO->getOperand(0));

1425

1426 if (auto *OpAR = dyn_cast(OpScev))

1427 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);

1428 }

1429

1430 return false;

1431}

1432

1433

1434std::optional<int64_t>

1436 const Loop *Lp,

1438 bool Assume, bool ShouldCheckWrap) {

1441 return 0;

1442

1443 Type *Ty = Ptr->getType();

1445 if (isa(AccessTy)) {

1446 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy

1447 << "\n");

1448 return std::nullopt;

1449 }

1450

1451 const SCEVAddRecExpr *AR = dyn_cast(PtrScev);

1452 if (Assume && !AR)

1454

1455 if (!AR) {

1456 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr

1457 << " SCEV: " << *PtrScev << "\n");

1458 return std::nullopt;

1459 }

1460

1461

1462 if (Lp != AR->getLoop()) {

1463 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "

1464 << *Ptr << " SCEV: " << *AR << "\n");

1465 return std::nullopt;

1466 }

1467

1468

1470

1471

1472 const SCEVConstant *C = dyn_cast(Step);

1473 if (C) {

1474 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr

1475 << " SCEV: " << *AR << "\n");

1476 return std::nullopt;

1477 }

1478

1480 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);

1482 const APInt &APStepVal = C->getAPInt();

1483

1484

1486 return std::nullopt;

1487

1489

1490

1491 int64_t Stride = StepVal / Size;

1492 int64_t Rem = StepVal % Size;

1493 if (Rem)

1494 return std::nullopt;

1495

1496 if (!ShouldCheckWrap)

1497 return Stride;

1498

1499

1500

1502 return Stride;

1503

1504

1505

1506

1507

1508

1509 if (auto *GEP = dyn_cast(Ptr);

1510 GEP && GEP->hasNoUnsignedSignedWrap())

1511 return Stride;

1512

1513

1514

1515

1518 (Stride == 1 || Stride == -1))

1519 return Stride;

1520

1521 if (Assume) {

1524 << "LAA: Pointer: " << *Ptr << "\n"

1525 << "LAA: SCEV: " << *AR << "\n"

1526 << "LAA: Added an overflow assumption\n");

1527 return Stride;

1528 }

1530 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "

1531 << *Ptr << " SCEV: " << *AR << "\n");

1532 return std::nullopt;

1533}

1534

1540 assert(PtrA && PtrB && "Expected non-nullptr pointers.");

1541

1542

1543 if (PtrA == PtrB)

1544 return 0;

1545

1546

1547 if (CheckType && ElemTyA != ElemTyB)

1548 return std::nullopt;

1549

1552

1553

1554 if (ASA != ASB)

1555 return std::nullopt;

1556 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);

1557

1558 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);

1560 DL, OffsetA, true);

1562 DL, OffsetB, true);

1563

1564 int Val;

1565 if (PtrA1 == PtrB1) {

1566

1567

1568 ASA = cast(PtrA1->getType())->getAddressSpace();

1569 ASB = cast(PtrB1->getType())->getAddressSpace();

1570

1571 if (ASA != ASB)

1572 return std::nullopt;

1573

1574 IdxWidth = DL.getIndexSizeInBits(ASA);

1575 OffsetA = OffsetA.sextOrTrunc(IdxWidth);

1576 OffsetB = OffsetB.sextOrTrunc(IdxWidth);

1577

1578 OffsetB -= OffsetA;

1580 } else {

1581

1582 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);

1583 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);

1584 std::optional Diff =

1586 if (!Diff)

1587 return std::nullopt;

1588 Val = Diff->getSExtValue();

1589 }

1590 int Size = DL.getTypeStoreSize(ElemTyA);

1591 int Dist = Val / Size;

1592

1593

1594

1595 if (!StrictCheck || Dist * Size == Val)

1596 return Dist;

1597 return std::nullopt;

1598}

1599

1604 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&

1605 "Expected list of pointer operands.");

1606

1607

1608 Value *Ptr0 = VL[0];

1609

1610 using DistOrdPair = std::pair<int64_t, int>;

1612 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);

1613 Offsets.emplace(0, 0);

1614 bool IsConsecutive = true;

1616 std::optional Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,

1617 true);

1618 if (!Diff)

1619 return false;

1620

1621

1622 int64_t Offset = *Diff;

1623 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);

1624 if (!IsInserted)

1625 return false;

1626

1627 IsConsecutive &= std::next(It) == Offsets.end();

1628 }

1629 SortedIndices.clear();

1630 if (!IsConsecutive) {

1631

1634 SortedIndices[Idx] = Off.second;

1635 }

1636 return true;

1637}

1638

1639

1644 if (!PtrA || !PtrB)

1645 return false;

1648 std::optional Diff =

1650 true, CheckType);

1651 return Diff && *Diff == 1;

1652}

1653

1655 visitPointers(SI->getPointerOperand(), *InnermostLoop,

1657 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);

1658 InstMap.push_back(SI);

1659 ++AccessIdx;

1660 });

1661}

1662

1666 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);

1667 InstMap.push_back(LI);

1668 ++AccessIdx;

1669 });

1670}

1671

1674 switch (Type) {

1679

1687 }

1689}

1690

1692 switch (Type) {

1693 case NoDep:

1694 case Forward:

1695 case ForwardButPreventsForwarding:

1697 case IndirectUnsafe:

1698 return false;

1699

1700 case BackwardVectorizable:

1701 case Backward:

1702 case BackwardVectorizableButPreventsForwarding:

1703 return true;

1704 }

1706}

1707

1709 return isBackward() || Type == Unknown || Type == IndirectUnsafe;

1710}

1711

1713 switch (Type) {

1714 case Forward:

1715 case ForwardButPreventsForwarding:

1716 return true;

1717

1718 case NoDep:

1720 case BackwardVectorizable:

1721 case Backward:

1722 case BackwardVectorizableButPreventsForwarding:

1723 case IndirectUnsafe:

1724 return false;

1725 }

1727}

1728

1729bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,

1731

1732

1733

1734

1735

1736

1737

1738

1739

1740

1741

1742

1743 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;

1744

1745 uint64_t MaxVFWithoutSLForwardIssues = std::min(

1747

1748

1749 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;

1750 VF *= 2) {

1751

1752

1753 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {

1754 MaxVFWithoutSLForwardIssues = (VF >> 1);

1755 break;

1756 }

1757 }

1758

1759 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {

1761 dbgs() << "LAA: Distance " << Distance

1762 << " that could cause a store-load forwarding conflict\n");

1763 return true;

1764 }

1765

1766 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&

1767 MaxVFWithoutSLForwardIssues !=

1769 MinDepDistBytes = MaxVFWithoutSLForwardIssues;

1770 return false;

1771}

1772

1776}

1777

1778

1779

1780

1781

1782

1783

1784

1785

1786

1787

1788

1789

1791 const SCEV &MaxBTC, const SCEV &Dist,

1794

1795

1796

1797

1798

1799

1800

1801

1802

1803

1804

1805

1806

1807

1808

1809

1810

1811

1812 const uint64_t ByteStride = MaxStride * TypeByteSize;

1815

1816 const SCEV *CastedDist = &Dist;

1817 const SCEV *CastedProduct = Product;

1818 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());

1819 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());

1820

1821

1822

1823

1824 if (DistTypeSizeBits > ProductTypeSizeBits)

1826 else

1828

1829

1830

1833 return true;

1834

1835

1836

1840}

1841

1842

1843

1844

1845

1846

1849 assert(Stride > 1 && "The stride must be greater than 1");

1850 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");

1851 assert(Distance > 0 && "The distance must be non-zero");

1852

1853

1854 if (Distance % TypeByteSize)

1855 return false;

1856

1857 uint64_t ScaledDist = Distance / TypeByteSize;

1858

1859

1860

1861

1862

1863

1864

1865

1866

1867

1868

1869

1870

1871

1872

1873

1874

1875 return ScaledDist % Stride;

1876}

1877

1879 MemoryDepChecker::DepDistanceStrideAndSizeInfo>

1880MemoryDepChecker::getDependenceDistanceStrideAndSize(

1884 auto &SE = *PSE.getSE();

1885 const auto &[APtr, AIsWrite] = A;

1886 const auto &[BPtr, BIsWrite] = B;

1887

1888

1889 if (!AIsWrite && !BIsWrite)

1891

1894

1895

1896 if (APtr->getType()->getPointerAddressSpace() !=

1897 BPtr->getType()->getPointerAddressSpace())

1899

1900 std::optional<int64_t> StrideAPtr =

1901 getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides, true, true);

1902 std::optional<int64_t> StrideBPtr =

1903 getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides, true, true);

1904

1907

1908

1909

1910

1911 if (StrideAPtr && *StrideAPtr < 0) {

1915 std::swap(StrideAPtr, StrideBPtr);

1916 }

1917

1919

1920 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink

1921 << "\n");

1922 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst

1923 << ": " << *Dist << "\n");

1924

1925

1926

1927

1928

1936 if (!isa(SrcStart_) &&

1937 !isa(SrcEnd_) &&

1938 !isa(SinkStart_) &&

1939 !isa(SinkEnd_)) {

1940 if (!LoopGuards)

1941 LoopGuards.emplace(

1944 auto SinkStart = SE.applyLoopGuards(SinkStart_, *LoopGuards);

1947

1948 auto SinkEnd = SE.applyLoopGuards(SinkEnd_, *LoopGuards);

1949 auto SrcStart = SE.applyLoopGuards(SrcStart_, *LoopGuards);

1952 }

1953 }

1954

1955

1956

1957

1958

1959

1960

1961

1962 if (!StrideAPtr || !StrideBPtr) {

1963 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");

1965 }

1966

1967 int64_t StrideAPtrInt = *StrideAPtr;

1968 int64_t StrideBPtrInt = *StrideBPtr;

1969 LLVM_DEBUG(dbgs() << "LAA: Src induction step: " << StrideAPtrInt

1970 << " Sink induction step: " << StrideBPtrInt << "\n");

1971

1972

1973 if (!StrideAPtrInt || !StrideBPtrInt)

1975

1976

1977

1978 if ((StrideAPtrInt > 0) != (StrideBPtrInt > 0)) {

1980 dbgs() << "Pointer access with strides in different directions\n");

1982 }

1983

1984 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);

1985 bool HasSameSize =

1986 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);

1987 if (!HasSameSize)

1988 TypeByteSize = 0;

1989

1990 StrideAPtrInt = std::abs(StrideAPtrInt);

1991 StrideBPtrInt = std::abs(StrideBPtrInt);

1992

1993 uint64_t MaxStride = std::max(StrideAPtrInt, StrideBPtrInt);

1994

1995 std::optional<uint64_t> CommonStride;

1996 if (StrideAPtrInt == StrideBPtrInt)

1997 CommonStride = StrideAPtrInt;

1998

1999

2000

2001

2002 bool ShouldRetryWithRuntimeCheck = CommonStride.has_value();

2003

2004 return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride,

2005 ShouldRetryWithRuntimeCheck, TypeByteSize,

2006 AIsWrite, BIsWrite);

2007}

2008

2010MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,

2012 assert(AIdx < BIdx && "Must pass arguments in program order");

2013

2014

2015

2016 auto Res =

2017 getDependenceDistanceStrideAndSize(A, InstMap[AIdx], B, InstMap[BIdx]);

2018 if (std::holds_alternativeDependence::DepType(Res))

2019 return std::getDependence::DepType(Res);

2020

2021 auto &[Dist, MaxStride, CommonStride, ShouldRetryWithRuntimeCheck,

2022 TypeByteSize, AIsWrite, BIsWrite] =

2023 std::get(Res);

2024 bool HasSameSize = TypeByteSize > 0;

2025

2026 if (isa(Dist)) {

2027

2028

2029 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;

2030 LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");

2032 }

2033

2036

2037

2038

2039

2040

2041

2044 *Dist, MaxStride, TypeByteSize))

2046

2047 const SCEVConstant *ConstDist = dyn_cast(Dist);

2048

2049

2050 if (ConstDist) {

2052

2053

2054

2055 if (Distance > 0 && CommonStride && CommonStride > 1 && HasSameSize &&

2057 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");

2059 }

2060 } else {

2061 if (!LoopGuards)

2062 LoopGuards.emplace(

2065 }

2066

2067

2070 if (HasSameSize) {

2071

2073 }

2074 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "

2075 "different type sizes\n");

2077 }

2078

2079 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);

2080

2081

2082

2083

2084

2085

2086

2087

2089 if (!ConstDist) {

2090

2091

2092

2093

2094 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;

2096 }

2097 if (!HasSameSize ||

2098 couldPreventStoreLoadForward(

2101 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");

2103 }

2104 }

2105

2106 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");

2108 }

2109

2111

2112 if (MinDistance <= 0) {

2113 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;

2115 }

2116

2117 if (!ConstDist) {

2118

2119

2120

2121

2122

2123

2124

2125

2126 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;

2127 }

2128

2129 if (!HasSameSize) {

2130 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "

2131 "different type sizes\n");

2133 }

2134

2135 if (!CommonStride)

2137

2138

2143

2144 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);

2145

2146

2147

2148

2149

2150

2151

2152

2153

2154

2155

2156

2157

2158

2159

2160

2161

2162

2163

2164

2165

2166

2167

2168

2169

2170

2171

2172

2173

2174

2175

2176 uint64_t MinDistanceNeeded =

2177 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;

2178 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {

2179 if (!ConstDist) {

2180

2181

2182

2183

2185 }

2186 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "

2187 << MinDistance << '\n');

2189 }

2190

2191

2192

2193 if (MinDistanceNeeded > MinDepDistBytes) {

2194 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "

2195 << MinDistanceNeeded << " size in bytes\n");

2197 }

2198

2199

2200

2201

2202

2203

2204

2205

2206

2207

2208

2209

2210

2211

2212

2213

2214

2215 MinDepDistBytes =

2216 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);

2217

2218 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);

2219 uint64_t MinDepDistBytesOld = MinDepDistBytes;

2221 couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {

2222

2223

2224 assert(MinDepDistBytes == MinDepDistBytesOld &&

2225 "An update to MinDepDistBytes requires an update to "

2226 "MaxSafeVectorWidthInBits");

2227 (void)MinDepDistBytesOld;

2229 }

2230

2231

2232

2233 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);

2234 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance

2235 << " with max VF = " << MaxVF << '\n');

2236

2237 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;

2238 if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) {

2239

2240

2241

2243 }

2244

2245 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);

2247}

2248

2251

2252 MinDepDistBytes = -1;

2255 if (Visited.count(CurAccess))

2256 continue;

2257

2258

2261

2262

2267

2268

2269 while (AI != AE) {

2270 Visited.insert(*AI);

2271 bool AIIsWrite = AI->getInt();

2272

2273

2275 (AIIsWrite ? AI : std::next(AI));

2276 while (OI != AE) {

2277

2278 for (std::vector::iterator I1 = Accesses[*AI].begin(),

2279 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)

2280

2281

2282 for (std::vector::iterator

2283 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),

2284 I2E = (OI == AI ? I1E : Accesses[*OI].end());

2285 I2 != I2E; ++I2) {

2286 auto A = std::make_pair(&*AI, *I1);

2287 auto B = std::make_pair(&*OI, *I2);

2288

2290 if (*I1 > *I2)

2292

2294 isDependent(*A.first, A.second, *B.first, B.second);

2296

2297

2298

2299

2300

2301 if (RecordDependences) {

2303 Dependences.emplace_back(A.second, B.second, Type);

2304

2306 RecordDependences = false;

2307 Dependences.clear();

2309 << "Too many dependences, stopped recording\n");

2310 }

2311 }

2313 return false;

2314 }

2315 ++OI;

2316 }

2317 ++AI;

2318 }

2319 }

2320

2321 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");

2323}

2324

2328 auto &IndexVector = Accesses.find(Access)->second;

2329

2332 std::back_inserter(Insts),

2333 [&](unsigned Idx) { return this->InstMap[Idx]; });

2334 return Insts;

2335}

2336

2338 "NoDep",

2339 "Unknown",

2340 "IndirectUnsafe",

2341 "Forward",

2342 "ForwardButPreventsForwarding",

2343 "Backward",

2344 "BackwardVectorizable",

2345 "BackwardVectorizableButPreventsForwarding"};

2346

2351 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";

2352 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";

2353}

2354

2355bool LoopAccessInfo::canAnalyzeLoop() {

2356

2359 << TheLoop->getLocStr() << "\n");

2360

2361

2363 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");

2364 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";

2365 return false;

2366 }

2367

2368

2371 dbgs() << "LAA: loop control flow is not understood by analyzer\n");

2372 recordAnalysis("CFGNotUnderstood")

2373 << "loop control flow is not understood by analyzer";

2374 return false;

2375 }

2376

2377

2378

2379

2381 if (isa(ExitCount)) {

2382 recordAnalysis("CantComputeNumberOfIterations")

2383 << "could not determine number of loop iterations";

2384 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");

2385 return false;

2386 }

2387

2388 LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "

2390 return true;

2391}

2392

2393bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,

2396

2400

2401

2402 unsigned NumReads = 0;

2403 unsigned NumReadWrites = 0;

2404

2405 bool HasComplexMemInst = false;

2406

2407

2408 HasConvergentOp = false;

2409

2410 PtrRtChecking->Pointers.clear();

2411 PtrRtChecking->Need = false;

2412

2414

2415 const bool EnableMemAccessVersioningOfLoop =

2418

2419

2420

2422 RPOT.perform(LI);

2424

2425

2427 if (auto *Call = dyn_cast(&I)) {

2428 if (Call->isConvergent())

2429 HasConvergentOp = true;

2430 }

2431

2432

2433

2434 if (HasComplexMemInst && HasConvergentOp)

2435 return false;

2436

2437

2438 if (HasComplexMemInst)

2439 continue;

2440

2441

2442 if (auto *Decl = dyn_cast(&I))

2443 for (Metadata *Op : Decl->getScopeList()->operands())

2444 LoopAliasScopes.insert(cast(Op));

2445

2446

2447

2448

2449 auto *Call = dyn_cast(&I);

2451 continue;

2452

2453

2454

2455

2456 if (I.mayReadFromMemory()) {

2457 auto hasPointerArgs = [](CallBase *CB) {

2458 return any_of(CB->args(), [](Value const *Arg) {

2459 return Arg->getType()->isPointerTy();

2460 });

2461 };

2462

2463

2464

2465

2466 if (Call && Call->isNoBuiltin() && Call->getCalledFunction() &&

2468 continue;

2469

2470 auto *Ld = dyn_cast(&I);

2471 if (!Ld) {

2472 recordAnalysis("CantVectorizeInstruction", Ld)

2473 << "instruction cannot be vectorized";

2474 HasComplexMemInst = true;

2475 continue;

2476 }

2477 if (!Ld->isSimple() && !IsAnnotatedParallel) {

2478 recordAnalysis("NonSimpleLoad", Ld)

2479 << "read with atomic ordering or volatile read";

2480 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");

2481 HasComplexMemInst = true;

2482 continue;

2483 }

2484 NumLoads++;

2487 if (EnableMemAccessVersioningOfLoop)

2488 collectStridedAccess(Ld);

2489 continue;

2490 }

2491

2492

2493 if (I.mayWriteToMemory()) {

2494 auto *St = dyn_cast(&I);

2495 if (!St) {

2496 recordAnalysis("CantVectorizeInstruction", St)

2497 << "instruction cannot be vectorized";

2498 HasComplexMemInst = true;

2499 continue;

2500 }

2501 if (!St->isSimple() && !IsAnnotatedParallel) {

2502 recordAnalysis("NonSimpleStore", St)

2503 << "write with atomic ordering or volatile write";

2504 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");

2505 HasComplexMemInst = true;

2506 continue;

2507 }

2508 NumStores++;

2511 if (EnableMemAccessVersioningOfLoop)

2512 collectStridedAccess(St);

2513 }

2514 }

2515 }

2516

2517 if (HasComplexMemInst)

2518 return false;

2519

2520

2521

2522

2523

2524

2525 if (!Stores.size()) {

2526 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");

2527 return true;

2528 }

2529

2531 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,

2532 LoopAliasScopes);

2533

2534

2535

2536

2537

2538

2540

2541

2542

2544

2546 Value *Ptr = ST->getPointerOperand();

2547

2548 if (isInvariant(Ptr)) {

2549

2550 StoresToInvariantAddresses.push_back(ST);

2551 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=

2552 !UniformStores.insert(Ptr).second;

2553 }

2554

2555

2556

2558 if (Seen.insert({Ptr, AccessTy}).second) {

2559 ++NumReadWrites;

2560

2562

2563

2564

2565 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))

2567

2569 [&Accesses, AccessTy, Loc](Value *Ptr) {

2570 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);

2571 Accesses.addStore(NewLoc, AccessTy);

2572 });

2573 }

2574 }

2575

2576 if (IsAnnotatedParallel) {

2578 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "

2579 << "checks.\n");

2580 return true;

2581 }

2582

2583 for (LoadInst *LD : Loads) {

2584 Value *Ptr = LD->getPointerOperand();

2585

2586

2587

2588

2589

2590

2591

2592

2593 bool IsReadOnlyPtr = false;

2595 if (Seen.insert({Ptr, AccessTy}).second ||

2596 getPtrStride(*PSE, AccessTy, Ptr, TheLoop, SymbolicStrides)) {

2597 ++NumReads;

2598 IsReadOnlyPtr = true;

2599 }

2600

2601

2602

2603 if (UniformStores.count(Ptr)) {

2604 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "

2605 "load and uniform store to the same address!\n");

2606 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;

2607 }

2608

2610

2611

2612

2613 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))

2615

2617 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {

2618 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);

2619 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);

2620 });

2621 }

2622

2623

2624

2625 if (NumReadWrites == 1 && NumReads == 0) {

2626 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");

2627 return true;

2628 }

2629

2630

2631

2632 Accesses.buildDependenceSets();

2633

2634

2635

2636 Value *UncomputablePtr = nullptr;

2637 bool CanDoRTIfNeeded =

2638 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,

2639 SymbolicStrides, UncomputablePtr, false);

2640 if (!CanDoRTIfNeeded) {

2641 const auto *I = dyn_cast_or_null(UncomputablePtr);

2642 recordAnalysis("CantIdentifyArrayBounds", I)

2643 << "cannot identify array bounds";

2644 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "

2645 << "the array bounds.\n");

2646 return false;

2647 }

2648

2650 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");

2651

2652 bool DepsAreSafe = true;

2653 if (Accesses.isDependencyCheckNeeded()) {

2654 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");

2655 DepsAreSafe = DepChecker->areDepsSafe(DependentAccesses,

2656 Accesses.getDependenciesToCheck());

2657

2659 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");

2660

2661

2662 Accesses.resetDepChecks(*DepChecker);

2663

2664 PtrRtChecking->reset();

2665 PtrRtChecking->Need = true;

2666

2667 auto *SE = PSE->getSE();

2668 UncomputablePtr = nullptr;

2669 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(

2670 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);

2671

2672

2673 if (!CanDoRTIfNeeded) {

2674 auto *I = dyn_cast_or_null(UncomputablePtr);

2675 recordAnalysis("CantCheckMemDepsAtRunTime", I)

2676 << "cannot check memory dependencies at runtime";

2677 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");

2678 return false;

2679 }

2680 DepsAreSafe = true;

2681 }

2682 }

2683

2684 if (HasConvergentOp) {

2685 recordAnalysis("CantInsertRuntimeCheckWithConvergent")

2686 << "cannot add control dependency to convergent operation";

2687 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "

2688 "would be needed with a convergent operation\n");

2689 return false;

2690 }

2691

2692 if (DepsAreSafe) {

2694 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"

2695 << (PtrRtChecking->Need ? "" : " don't")

2696 << " need runtime memory checks.\n");

2697 return true;

2698 }

2699

2700 emitUnsafeDependenceRemark();

2701 return false;

2702}

2703

2704void LoopAccessInfo::emitUnsafeDependenceRemark() {

2705 const auto *Deps = getDepChecker().getDependences();

2706 if (!Deps)

2707 return;

2708 const auto *Found =

2712 });

2713 if (Found == Deps->end())

2714 return;

2716

2717 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");

2718

2719

2720 bool HasForcedDistribution = false;

2721 std::optional<const MDOperand *> Value =

2725 assert(Op && mdconst::hasa(*Op) && "invalid metadata");

2726 HasForcedDistribution = mdconst::extract(*Op)->getZExtValue();

2727 }

2728

2729 const std::string Info =

2730 HasForcedDistribution

2731 ? "unsafe dependent memory operations in loop."

2732 : "unsafe dependent memory operations in loop. Use "

2733 "#pragma clang loop distribute(enable) to allow loop distribution "

2734 "to attempt to isolate the offending operations into a separate "

2735 "loop";

2737 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;

2738

2739 switch (Dep.Type) {

2745 R << "\nBackward loop carried data dependence.";

2746 break;

2748 R << "\nForward loop carried data dependence that prevents "

2749 "store-to-load forwarding.";

2750 break;

2752 R << "\nBackward loop carried data dependence that prevents "

2753 "store-to-load forwarding.";

2754 break;

2756 R << "\nUnsafe indirect dependence.";

2757 break;

2759 R << "\nUnknown data dependence.";

2760 break;

2761 }

2762

2764 DebugLoc SourceLoc = I->getDebugLoc();

2766 SourceLoc = DD->getDebugLoc();

2767 if (SourceLoc)

2768 R << " Memory location is the same as accessed at "

2769 << ore::NV("Location", SourceLoc);

2770 }

2771}

2772

2775 assert(TheLoop->contains(BB) && "Unknown block used");

2776

2777

2779 return !DT->dominates(BB, Latch);

2780}

2781

2784 assert(!Report && "Multiple reports generated");

2785

2788

2789 if (I) {

2790 CodeRegion = I->getParent();

2791

2792

2793 if (I->getDebugLoc())

2794 DL = I->getDebugLoc();

2795 }

2796

2797 Report = std::make_unique(DEBUG_TYPE, RemarkName, DL,

2798 CodeRegion);

2799 return *Report;

2800}

2801

2803 auto *SE = PSE->getSE();

2804

2805

2807 return false;

2810}

2811

2812

2813

2814

2819

2820

2822

2824 std::advance(GEPTI, LastOperand - 2);

2825

2826

2827

2831 if (ElemSize != GEPAllocSize)

2832 break;

2833 --LastOperand;

2834 }

2835

2836 return LastOperand;

2837}

2838

2839

2840

2841

2843 auto *GEP = dyn_cast(Ptr);

2844 if (GEP)

2845 return Ptr;

2846

2848

2849

2850

2851 for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I)

2852 if (I != InductionOperand &&

2854 return Ptr;

2855 return GEP->getOperand(InductionOperand);

2856}

2857

2858

2859

2861 auto *PtrTy = dyn_cast(Ptr->getType());

2862 if (!PtrTy || PtrTy->isAggregateType())

2863 return nullptr;

2864

2865

2866

2867

2869

2870

2871 int64_t PtrAccessSize = 1;

2872

2875

2876 if (Ptr != OrigPtr)

2877

2879 V = C->getOperand();

2880

2881 const SCEVAddRecExpr *S = dyn_cast(V);

2882 if (!S)

2883 return nullptr;

2884

2885

2886

2888 return nullptr;

2889

2891 if (!V)

2892 return nullptr;

2893

2894

2895

2896 if (OrigPtr == Ptr) {

2897 if (const SCEVMulExpr *M = dyn_cast(V)) {

2898 if (M->getOperand(0)->getSCEVType() != scConstant)

2899 return nullptr;

2900

2901 const APInt &APStepVal = cast(M->getOperand(0))->getAPInt();

2902

2903

2905 return nullptr;

2906

2908 if (PtrAccessSize != StepVal)

2909 return nullptr;

2910 V = M->getOperand(1);

2911 }

2912 }

2913

2914

2915

2917 return nullptr;

2918

2919

2920 if (isa(V))

2921 return V;

2922

2923 if (const auto *C = dyn_cast(V))

2924 if (isa(C->getOperand()))

2925 return V;

2926

2927 return nullptr;

2928}

2929

2930void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {

2932 if (Ptr)

2933 return;

2934

2935

2936

2937

2938

2939

2940

2942 if (!StrideExpr)

2943 return;

2944

2945 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "

2946 "versioning:");

2947 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");

2948

2950 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");

2951 return;

2952 }

2953

2954

2955

2956

2957

2958

2959

2960

2961

2962

2963

2964

2965

2966

2968

2969

2970

2971

2973 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());

2974 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());

2975 const SCEV *CastedStride = StrideExpr;

2976 const SCEV *CastedBECount = MaxBTC;

2978 if (BETypeSizeBits >= StrideTypeSizeBits)

2980 else

2982 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);

2983

2984

2985

2988 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "

2989 "Stride==1 predicate will imply that the loop executes "

2990 "at most once.\n");

2991 return;

2992 }

2993 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");

2994

2995

2996

2997 const SCEV *StrideBase = StrideExpr;

2998 if (const auto *C = dyn_cast(StrideBase))

2999 StrideBase = C->getOperand();

3000 SymbolicStrides[Ptr] = cast(StrideBase);

3001}

3002

3008 PtrRtChecking(nullptr), TheLoop(L) {

3009 unsigned MaxTargetVectorWidthInBits = std::numeric_limits::max();

3010 if (TTI) {

3014

3015

3016 MaxTargetVectorWidthInBits = FixedWidth.getFixedValue() * 2;

3017 }

3018

3022 MaxTargetVectorWidthInBits = std::numeric_limits::max();

3023 }

3024 DepChecker = std::make_unique(*PSE, L, SymbolicStrides,

3025 MaxTargetVectorWidthInBits);

3026 PtrRtChecking = std::make_unique(*DepChecker, SE);

3027 if (canAnalyzeLoop())

3028 CanVecMem = analyzeLoop(AA, LI, TLI, DT);

3029}

3030

3032 if (CanVecMem) {

3033 OS.indent(Depth) << "Memory dependences are safe";

3036 OS << " with a maximum safe vector width of "

3038 if (PtrRtChecking->Need)

3039 OS << " with run-time checks";

3040 OS << "\n";

3041 }

3042

3043 if (HasConvergentOp)

3044 OS.indent(Depth) << "Has convergent operation in loop\n";

3045

3046 if (Report)

3047 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";

3048

3049 if (auto *Dependences = DepChecker->getDependences()) {

3051 for (const auto &Dep : *Dependences) {

3053 OS << "\n";

3054 }

3055 } else

3056 OS.indent(Depth) << "Too many dependences, not recorded\n";

3057

3058

3059 PtrRtChecking->print(OS, Depth);

3060 OS << "\n";

3061

3063 << "Non vectorizable stores to invariant address were "

3064 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||

3065 HasLoadStoreDependenceInvolvingLoopInvariantAddress

3066 ? ""

3067 : "not ")

3068 << "found in loop.\n";

3069

3072

3073 OS << "\n";

3074

3075 OS.indent(Depth) << "Expressions re-written:\n";

3077}

3078

3080 const auto &[It, Inserted] = LoopAccessInfoMap.insert({&L, nullptr});

3081

3082 if (Inserted)

3083 It->second =

3084 std::make_unique(&L, &SE, TTI, TLI, &AA, &DT, &LI);

3085

3086 return *It->second;

3087}

3090

3091

3092

3093

3094 for (const auto &[L, LAI] : LoopAccessInfoMap) {

3095 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&

3096 LAI->getPSE().getPredicate().isAlwaysTrue())

3097 continue;

3099 }

3100

3102 LoopAccessInfoMap.erase(L);

3103}

3104

3108

3111

3112 return true;

3113

3114

3115

3116

3121}

3122

3132}

3133

This file implements a class to represent arbitrary precision integral constant values and operations...

ReachingDefAnalysis InstSet & ToRemove

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")

Analysis containing CSE Info

This file contains the declarations for the subclasses of Constant, which represent the different fla...

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

This file defines the DenseMap class.

Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...

This header defines various interfaces for pass management in LLVM.

static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))

We collect dependences up to this threshold.

static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))

Enable store-to-load forwarding conflict detection.

static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)

static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)

Check whether a pointer can participate in a runtime bounds check.

static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))

The maximum iterations used to merge memory checks.

static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)

Get the stride of a pointer access in a loop.

static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)

Find the operand of the GEP that should be checked for consecutive stores.

static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))

static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L, bool Assume)

Check whether a pointer address cannot wrap.

static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))

static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))

static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride, uint64_t TypeByteSize)

Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...

static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))

static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)

static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)

Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.

static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)

If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.

static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)

Check the dependence for two accesses with the same stride Stride.

static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)

Compare I and J and return the minimum.

static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))

static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))

static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)

static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))

This enables versioning on the strides of symbolically striding memory accesses in code like the foll...

This header provides classes for managing per-loop analyses.

This file provides utility analysis objects describing memory locations.

FunctionAnalysisManager FAM

This file defines the PointerIntPair class.

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)

This file implements a set that has insertion order iteration characteristics.

This file defines the SmallPtrSet class.

This file defines the SmallSet class.

This file defines the SmallVector class.

static SymbolRef::Type getType(const Symbol *Sym)

This pass exposes codegen information to IR-level passes.

static const X86InstrFMA3Group Groups[]

A manager for alias analyses.

Class for arbitrary precision integers.

uint64_t getZExtValue() const

Get zero extended value.

APInt abs() const

Get the absolute value.

unsigned getBitWidth() const

Return the number of bits in the APInt.

APInt sextOrTrunc(unsigned width) const

Sign extend or truncate to width.

int64_t getSExtValue() const

Get sign extended value.

This templated class represents "all analyses that operate over " (e....

API to communicate dependencies between analyses during invalidation.

bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)

Trigger the invalidation of some other analysis pass if not already handled and return whether it was...

A container for analyses that lazily runs them and caches their results.

PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)

Get the result of an analysis pass for a given IR unit.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

LLVM Basic Block Representation.

const Function * getParent() const

Return the enclosing method, or null if none.

const DataLayout & getDataLayout() const

Get the data layout of the module this basic block belongs to.

This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

@ ICMP_ULE

unsigned less or equal

This class represents an Operation in the Expression.

A parsed version of the target data layout string in and methods for querying it.

iterator find(const_arg_type_t< KeyT > Val)

Analysis pass which computes a DominatorTree.

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

bool dominates(const BasicBlock *BB, const Use &U) const

Return true if the (end of the) basic block BB dominates the use U.

EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...

iterator findValue(const ElemTy &V) const

findValue - Return an iterator to the specified value.

iterator insert(const ElemTy &Data)

insert - Insert a new value into the union/find set, ignoring the request if the value already exists...

member_iterator member_end() const

typename std::set< ECValue, ECValueComparator >::const_iterator iterator

iterator* - Provides a way to iterate over all values in the set.

member_iterator member_begin(iterator I) const

member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)

union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...

const ElemTy & getLeaderValue(const ElemTy &V) const

getLeaderValue - Return the leader for the specified value that is in the set.

bool hasOptSize() const

Optimize this function for size (-Os) or minimum size (-Oz).

an instruction for type-safe pointer arithmetic to access elements of arrays and structs

Type * getResultElementType() const

PointerType * getType() const

Global values are always pointers.

const DataLayout & getDataLayout() const

Get the data layout of the module this instruction belongs to.

Class to represent integer types.

static IntegerType * get(LLVMContext &C, unsigned NumBits)

This static method is the primary way of constructing an IntegerType.

An instruction for reading from memory.

Value * getPointerOperand()

static constexpr LocationSize beforeOrAfterPointer()

Any location before or after the base pointer (but still within the underlying object).

This analysis provides dependence information for the memory accesses of a loop.

Result run(Function &F, FunctionAnalysisManager &AM)

bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)

const LoopAccessInfo & getInfo(Loop &L)

Drive the analysis of memory accesses in the loop.

const MemoryDepChecker & getDepChecker() const

the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...

bool isInvariant(Value *V) const

Returns true if value V is loop invariant.

void print(raw_ostream &OS, unsigned Depth=0) const

Print the information about the memory accesses in the loop.

static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)

Return true if the block BB needs to be predicated in order for the loop to be vectorized.

LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)

Analysis pass that exposes the LoopInfo for a function.

bool contains(const LoopT *L) const

Return true if the specified loop is contained within in this loop.

BlockT * getLoopLatch() const

If there is a single latch block for this loop, return it.

bool isInnermost() const

Return true if the loop does not contain any (natural) loops.

unsigned getNumBackEdges() const

Calculate the number of back edges to the loop header.

BlockT * getHeader() const

LoopT * getParentLoop() const

Return the parent loop if it exists or nullptr for top level loops.

Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...

Represents a single loop in the control flow graph.

std::string getLocStr() const

Return a string containing the debug location of the loop (file name + line number if present,...

bool isAnnotatedParallel() const

Returns true if the loop is annotated parallel.

DebugLoc getStartLoc() const

Return the debug location of the start of this loop.

ArrayRef< MDOperand > operands() const

Tracking metadata reference owned by Metadata.

This class implements a map that also provides access to all stored values in a deterministic order.

Checks memory dependences among accesses to the same underlying object to determine whether there vec...

ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const

Return the program order indices for the access location (Ptr, IsWrite).

bool isSafeForAnyVectorWidth() const

Return true if the number of elements that are safe to operate on simultaneously is not bounded.

bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)

Check whether the dependencies between the accesses are safe.

const SmallVectorImpl< Instruction * > & getMemoryInstructions() const

The vector of memory access instructions.

const Loop * getInnermostLoop() const

uint64_t getMaxSafeVectorWidthInBits() const

Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...

bool isSafeForVectorization() const

No memory dependence was encountered that would inhibit vectorization.

const SmallVectorImpl< Dependence > * getDependences() const

Returns the memory dependences.

DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > & getPointerBounds()

SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const

Find the set of instructions that read or write via Ptr.

VectorizationSafetyStatus

Type to keep track of the status of the dependence check.

@ PossiblySafeWithRtChecks

bool shouldRetryWithRuntimeCheck() const

In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...

void addAccess(StoreInst *SI)

Register the location (instructions are given increasing numbers) of a write access.

PointerIntPair< Value *, 1, bool > MemAccessInfo

Representation for a specific memory location.

static MemoryLocation get(const LoadInst *LI)

Return a location with information about the memory reference by the given instruction.

LocationSize Size

The maximum size of the location, in address-units, or UnknownSize if the size is not known.

AAMDNodes AATags

The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...

const Value * Ptr

The address of the start of the location.

An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...

void addPredicate(const SCEVPredicate &Pred)

Adds a new predicate.

ScalarEvolution * getSE() const

Returns the ScalarEvolution analysis used.

const SCEVPredicate & getPredicate() const

bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)

Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.

void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)

Proves that V doesn't overflow by adding SCEV predicate.

void print(raw_ostream &OS, unsigned Depth) const

Print the SCEV mappings done by the Predicated Scalar Evolution.

const SCEVAddRecExpr * getAsAddRec(Value *V)

Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.

const SCEV * getSymbolicMaxBackedgeTakenCount()

Get the (predicated) symbolic max backedge count for the analyzed loop.

const SCEV * getSCEV(Value *V)

Returns the SCEV expression of V, in the context of the current SCEV predicate.

A set of analyses that are preserved following a run of a transformation pass.

PreservedAnalysisChecker getChecker() const

Build a checker for this PreservedAnalyses and the specified analysis type.

Holds information about the memory runtime legality checks to verify that a group of pointers do not ...

bool Need

This flag indicates if we need to add the runtime check.

void reset()

Reset the state of the pointer runtime information.

unsigned getNumberOfChecks() const

Returns the number of run-time checks required according to needsChecking.

void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const

Print Checks.

bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const

Decide if we need to add a check between two groups of pointers, according to needsChecking.

void print(raw_ostream &OS, unsigned Depth=0) const

Print the list run-time memory checks necessary.

SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups

Holds a partitioning of pointers into "check groups".

void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)

Generate the checks and store it.

static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)

Check if pointers are in the same partition.

SmallVector< PointerInfo, 2 > Pointers

Information about the pointers that may require checking.

void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)

Insert a pointer and calculate the start and end SCEVs.

This node represents a polynomial recurrence on the trip count of the specified loop.

const SCEV * getStepRecurrence(ScalarEvolution &SE) const

Constructs and returns the recurrence indicating how much this expression steps by.

bool isAffine() const

Return true if this represents an expression A + B*x where A and B are loop invariant values.

const Loop * getLoop() const

This class represents a constant integer value.

const APInt & getAPInt() const

This is the base class for unary integral cast operator classes.

This node represents multiplication of some number of SCEVs.

NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const

virtual void print(raw_ostream &OS, unsigned Depth=0) const =0

Prints a textual representation of this predicate with an indentation of Depth.

This class represents an analyzed expression in the program.

Type * getType() const

Return the LLVM type of this SCEV expression.

Analysis pass that exposes the ScalarEvolution for a function.

static LoopGuards collect(const Loop *L, ScalarEvolution &SE)

Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...

The main scalar evolution driver.

bool isKnownNonNegative(const SCEV *S)

Test if the given expression is known to be non-negative.

const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)

Return the SCEV object corresponding to -V.

bool isKnownNonPositive(const SCEV *S)

Test if the given expression is known to be non-positive.

const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)

const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)

const SCEV * getConstant(ConstantInt *V)

const SCEV * getSCEV(Value *V)

Return a SCEV expression for the full generality of the specified expression.

const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)

Return a SCEV corresponding to a conversion of the input value to the specified type.

const SCEV * getOne(Type *Ty)

Return a SCEV for the constant 1 of a specific type.

const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)

bool isLoopInvariant(const SCEV *S, const Loop *L)

Return true if the value of the given SCEV is unchanging in the specified loop.

bool isKnownPositive(const SCEV *S)

Test if the given expression is known to be positive.

const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)

bool isSCEVable(Type *Ty) const

Test if values of the given type are analyzable within the SCEV framework.

Type * getEffectiveSCEVType(Type *Ty) const

Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...

const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)

APInt getSignedRangeMin(const SCEV *S)

Determine the min of the signed range for a particular SCEV.

const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)

Return an expression for the store size of StoreTy that is type IntTy.

const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)

Return LHS-RHS.

const SCEV * getCouldNotCompute()

const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)

Try to apply information from loop guards for L to Expr.

const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)

Get a canonical multiply expression, or something simpler if possible.

const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)

Return an expression for a TypeSize.

std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)

Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...

const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)

Get a canonical add expression, or something simpler if possible.

const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)

Return a SCEV corresponding to a conversion of the input value to the specified type.

bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)

Test if the given expression is known to satisfy the condition described by Pred, LHS,...

A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...

size_type count(const T &V) const

count - Return 1 if the element is in the set, 0 otherwise.

std::pair< const_iterator, bool > insert(const T &V)

insert - Insert an element into the set if it isn't already there.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

StringRef - Represent a constant reference to a string, i.e.

Analysis pass providing the TargetTransformInfo.

Analysis pass providing the TargetLibraryInfo.

Provides information about what library functions are available for the current target.

This pass provides access to the codegen interfaces that are needed for IR-level transformations.

TypeSize getRegisterBitWidth(RegisterKind K) const

The instances of the Type class are immutable: once they are created, they are never changed.

bool isVectorTy() const

True if this is an instance of VectorType.

bool isPointerTy() const

True if this is an instance of PointerType.

unsigned getPointerAddressSpace() const

Get the address space of this pointer or pointer vector type.

A Use represents the edge between a Value definition and its users.

Value * getOperand(unsigned i) const

unsigned getNumOperands() const

static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)

Retrieve all the VFInfo instances associated to the CallInst CI.

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const

Accumulate the constant offset this value has compared to a base pointer.

StringRef getName() const

Return a constant reference to the value's name.

constexpr ScalarTy getFixedValue() const

constexpr bool isNonZero() const

An efficient, type-erasing, non-owning reference to a callable.

TypeSize getSequentialElementStride(const DataLayout &DL) const

Type * getIndexedType() const

This class implements an extremely fast bulk output stream that can only output to a stream.

raw_ostream & indent(unsigned NumSpaces)

indent - Insert 'NumSpaces' spaces.

friend const_iterator end(StringRef path)

Get end iterator over path.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

@ C

The default llvm calling convention, compatible with C.

bool match(Val *V, const Pattern &P)

is_zero m_Zero()

Match any null constant or a vector with all elements equal to 0.

initializer< Ty > init(const Ty &Val)

LocationClass< Ty > location(Ty &L)

DiagnosticInfoOptimizationBase::Argument NV

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)

Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...

@ Low

Lower the current thread's priority such that it does not affect foreground tasks significantly.

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)

Returns intrinsic ID for call.

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

unsigned getPointerAddressSpace(const Type *T)

std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)

Find string metadata for loop.

const Value * getLoadStorePointerOperand(const Value *V)

A helper function that returns the pointer operand of a load or store instruction.

const Value * getPointerOperand(const Value *V)

A helper function that returns the pointer operand of a load, store or GEP instruction.

OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)

Wrapper function around std::transform to apply a function to a range and store the result elsewhere.

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

bool NullPointerIsDefined(const Function *F, unsigned AS=0)

Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

bool isPointerTy(const Type *T)

std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)

If the pointer has a constant stride return it in units of the access type size.

bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)

Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...

@ First

Helpers to iterate all locations in the MemoryEffectsBase class.

const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)

Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...

bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)

Returns true if the memory operations A and B are consecutive.

bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)

Return true if this function can prove that V does not have undef bits and is never poison.

OutputIt copy(R &&Range, OutputIt Out)

auto find_if(R &&Range, UnaryPredicate P)

Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.

gep_type_iterator gep_type_begin(const User *GEP)

void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)

This method is similar to getUnderlyingObject except that it can look through phi and select instruct...

Type * getLoadStoreType(const Value *I)

A helper function that returns the type of a load or store instruction.

std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *MaxBECount, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds)

Calculate Start and End points of memory access.

Implement std::hash so that hash_code can be used in STL containers.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

IR Values for the lower and upper bounds of a pointer evolution.

MDNode * Scope

The tag for alias scope specification (used with noalias).

MDNode * TBAA

The tag for type-based alias analysis.

MDNode * NoAlias

The tag specifying the noalias scope.

A special type used by analysis passes to provide an address that identifies that particular analysis...

Dependece between memory access instructions.

Instruction * getDestination(const MemoryDepChecker &DepChecker) const

Return the destination instruction of the dependence.

DepType Type

The type of the dependence.

bool isPossiblyBackward() const

May be a lexically backward dependence type (includes Unknown).

Instruction * getSource(const MemoryDepChecker &DepChecker) const

Return the source instruction of the dependence.

bool isForward() const

Lexically forward dependence.

bool isBackward() const

Lexically backward dependence.

void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const

Print the dependence.

DepType

The type of the dependence.

@ BackwardVectorizableButPreventsForwarding

@ ForwardButPreventsForwarding

static const char * DepName[]

String version of the types.

static VectorizationSafetyStatus isSafeForVectorization(DepType Type)

Dependence types that don't prevent vectorization.

unsigned AddressSpace

Address space of the involved pointers.

bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)

Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.

bool NeedsFreeze

Whether the pointer needs to be frozen after expansion, e.g.

RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)

Create a new pointer checking group containing a single pointer, with index Index in RtCheck.

const SCEV * High

The SCEV expression which represents the upper bound of all the pointers in this group.

SmallVector< unsigned, 2 > Members

Indices of all the pointers that constitute this grouping.

const SCEV * Low

The SCEV expression which represents the lower bound of all the pointers in this group.

bool IsWritePtr

Holds the information if this pointer is used for writing to memory.

unsigned DependencySetId

Holds the id of the set of pointers that could be dependent because of a shared underlying object.

unsigned AliasSetId

Holds the id of the disjoint alias set to which this pointer belongs.

static const unsigned MaxVectorWidth

Maximum SIMD width.

static unsigned VectorizationFactor

VF as overridden by the user.

static unsigned RuntimeMemoryCheckThreshold

\When performing memory disambiguation checks at runtime do not make more than this number of compari...

static bool isInterleaveForced()

True if force-vector-interleave was specified by the user.

static unsigned VectorizationInterleave

Interleave factor as overridden by the user.

static bool HoistRuntimeChecks

Function object to check whether the first component of a container supported by std::get (like std::...