LLVM: lib/Analysis/LoopAccessAnalysis.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

60#include

61#include

62#include

63#include

64#include

65#include

66#include

67

68using namespace llvm;

70

71#define DEBUG_TYPE "loop-accesses"

72

75 cl::desc("Sets the SIMD width. Zero is autoselect."),

78

81 cl::desc("Sets the vectorization interleave count. "

82 "Zero is autoselect."),

86

88 "runtime-memory-check-threshold", cl::Hidden,

89 cl::desc("When performing memory disambiguation checks at runtime do not "

90 "generate more than this number of comparisons (default = 8)."),

93

94

96 "memory-check-merge-threshold", cl::Hidden,

97 cl::desc("Maximum number of comparisons done when trying to merge "

98 "runtime memory checks. (default = 100)"),

100

101

103

104

107 cl::desc("Maximum number of dependences collected by "

108 "loop-access analysis (default = 100)"),

110

111

112

113

114

115

116

117

118

119

120

121

124 cl::desc("Enable symbolic stride memory access versioning"));

125

126

127

129 "store-to-load-forwarding-conflict-detection", cl::Hidden,

130 cl::desc("Enable conflict detection in loop-access analysis"),

132

135 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),

137

139 "laa-speculate-unit-stride", cl::Hidden,

140 cl::desc("Speculate that non-constant strides are unit in LAA"),

142

146 "Hoist inner loop runtime memory checks to outer loop if possible"),

149

151 return ::VectorizationInterleave.getNumOccurrences() > 0;

152}

153

158

159

160

162 if (SI == PtrToStride.end())

163

164 return OrigSCEV;

165

166 const SCEV *StrideSCEV = SI->second;

167

168

169

170

171 assert(isa(StrideSCEV) && "shouldn't be in map");

172

177

178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV

179 << " by: " << *Expr << "\n");

180 return Expr;

181}

182

185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),

189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {

191}

192

193

194

195

196

197

198

199

200

201

202

203

204

205

207 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,

209 DenseMap<std::pair<const SCEV *, Type *>,

210 std::pair<const SCEV *, const SCEV *>> &PointerBounds) {

212

214 {{PtrExpr, AccessTy},

216 if (!Ins)

217 return Iter->second;

218

219 const SCEV *ScStart;

220 const SCEV *ScEnd;

221

223 ScStart = ScEnd = PtrExpr;

224 } else if (auto *AR = dyn_cast(PtrExpr)) {

226

227 ScStart = AR->getStart();

228 ScEnd = AR->evaluateAtIteration(Ex, *SE);

229 const SCEV *Step = AR->getStepRecurrence(*SE);

230

231

232

233 if (const auto *CStep = dyn_cast(Step)) {

234 if (CStep->getValue()->isNegative())

236 } else {

237

238

239

240 ScStart = SE->getUMinExpr(ScStart, ScEnd);

241 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);

242 }

243 } else

245

248

249

251 Type *IdxTy = DL.getIndexType(PtrExpr->getType());

253 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);

254

255 Iter->second = {ScStart, ScEnd};

256 return Iter->second;

257}

258

259

260

262 Type *AccessTy, bool WritePtr,

263 unsigned DepSetId, unsigned ASId,

265 bool NeedsFreeze) {

268 assert(!isa(ScStart) &&

269 !isa(ScEnd) &&

270 "must be able to compute both start and end expressions");

271 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,

272 NeedsFreeze);

273}

274

275bool RuntimePointerChecking::tryToCreateDiffCheck(

277

278

279

281 return false;

282

285

286

287

290 return false;

291

296

297

298 if (AccSrc.size() != 1 || AccSink.size() != 1)

299 return false;

300

301

302 if (AccSink[0] < AccSrc[0])

304

305 auto *SrcAR = dyn_cast(Src->Expr);

306 auto *SinkAR = dyn_cast(Sink->Expr);

307 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||

309 return false;

310

317 if (isa(SrcTy) || isa(DstTy))

318 return false;

319

321 SinkAR->getLoop()->getHeader()->getDataLayout();

322 unsigned AllocSize =

323 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));

324

325

326

327

328 auto *Step = dyn_cast(SinkAR->getStepRecurrence(*SE));

329 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||

330 Step->getAPInt().abs() != AllocSize)

331 return false;

332

336

337

338 if (Step->getValue()->isNegative())

340

341 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);

343 if (isa(SinkStartInt) ||

344 isa(SrcStartInt))

345 return false;

346

347 const Loop *InnerLoop = SrcAR->getLoop();

348

349

350

351

353 isa(SinkStartInt) && isa(SrcStartInt)) {

354 auto *SrcStartAR = cast(SrcStartInt);

355 auto *SinkStartAR = cast(SinkStartInt);

356 const Loop *StartARLoop = SrcStartAR->getLoop();

357 if (StartARLoop == SinkStartAR->getLoop() &&

359

360

361

362 SrcStartAR->getStepRecurrence(*SE) !=

363 SinkStartAR->getStepRecurrence(*SE)) {

364 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "

365 "cannot be hoisted out of the outer loop\n");

366 return false;

367 }

368 }

369

370 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"

371 << "SrcStart: " << *SrcStartInt << '\n'

372 << "SinkStartInt: " << *SinkStartInt << '\n');

373 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,

374 Src->NeedsFreeze || Sink->NeedsFreeze);

375 return true;

376}

377

380

382 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {

385

387 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);

389 }

390 }

391 }

392 return Checks;

393}

394

395void RuntimePointerChecking::generateChecks(

397 assert(Checks.empty() && "Checks is not empty");

398 groupChecks(DepCands, UseDependencies);

400}

401

404 for (const auto &I : M.Members)

405 for (const auto &J : N.Members)

407 return true;

408 return false;

409}

410

411

412

416 if (!Diff)

417 return nullptr;

418 return Diff->isNegative() ? J : I;

419}

420

424 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,

425 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),

426 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);

427}

428

430 const SCEV *End, unsigned AS,

431 bool NeedsFreeze,

434 "all pointers in a checking group must be in the same address space");

435

436

437

438

440 if (!Min0)

441 return false;

442

444 if (!Min1)

445 return false;

446

447

448 if (Min0 == Start)

449 Low = Start;

450

451

452 if (Min1 != End)

454

457 return true;

458}

459

460void RuntimePointerChecking::groupChecks(

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506 if (!UseDependencies) {

507 for (unsigned I = 0; I < Pointers.size(); ++I)

509 return;

510 }

511

512 unsigned TotalComparisons = 0;

513

515 for (unsigned Index = 0; Index < Pointers.size(); ++Index)

516 PositionMap[Pointers[Index].PointerValue].push_back(Index);

517

518

519

521

522

523

524

525 for (unsigned I = 0; I < Pointers.size(); ++I) {

526

527

529 continue;

530

533

536

537

538

539

540

541

543 MI != ME; ++MI) {

544 auto PointerI = PositionMap.find(MI->getPointer());

545 assert(PointerI != PositionMap.end() &&

546 "pointer in equivalence class not found in PositionMap");

547 for (unsigned Pointer : PointerI->second) {

548 bool Merged = false;

549

550 Seen.insert(Pointer);

551

552

553

555

556

557

558

560 break;

561

562 TotalComparisons++;

563

564 if (Group.addPointer(Pointer, *this)) {

565 Merged = true;

566 break;

567 }

568 }

569

570 if (!Merged)

571

572

573

574 Groups.emplace_back(Pointer, *this);

575 }

576 }

577

578

579

581 }

582}

583

586 unsigned PtrIdx2) {

587 return (PtrToPartition[PtrIdx1] != -1 &&

588 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);

589}

590

594

595

597 return false;

598

599

601 return false;

602

603

605}

606

609 unsigned Depth) const {

610 unsigned N = 0;

611 for (const auto &[Check1, Check2] : Checks) {

612 const auto &First = Check1->Members, &Second = Check2->Members;

613

615

616 OS.indent(Depth + 2) << "Comparing group (" << Check1 << "):\n";

617 for (unsigned K : First)

619

620 OS.indent(Depth + 2) << "Against group (" << Check2 << "):\n";

621 for (unsigned K : Second)

623 }

624}

625

627

630

633 OS.indent(Depth + 2) << "Group " << &CG << ":\n";

634 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High

635 << ")\n";

636 for (unsigned Member : CG.Members) {

638 }

639 }

640}

641

642namespace {

643

644

645

646

647

648class AccessAnalysis {

649public:

650

653

658 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),

659 LoopAliasScopes(LoopAliasScopes) {

660

661 BAA.enableCrossIterationMode();

662 }

663

664

665 void addLoad(const MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {

667 AST.add(adjustLoc(Loc));

668 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);

669 if (IsReadOnly)

670 ReadOnlyPtr.insert(Ptr);

671 }

672

673

676 AST.add(adjustLoc(Loc));

677 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);

678 }

679

680

681

682

683

684

685

686

688 MemAccessInfo Access, Type *AccessTy,

691 Loop *TheLoop, unsigned &RunningDepId,

692 unsigned ASId, bool ShouldCheckStride, bool Assume);

693

694

695

696

697

698

701 Value *&UncomputablePtr, bool ShouldCheckWrap = false);

702

703

704

705 void buildDependenceSets() {

706 processMemAccesses();

707 }

708

709

710

711

712

713

714 bool isDependencyCheckNeeded() const { return !CheckDeps.empty(); }

715

716

718 CheckDeps.clear();

720 }

721

722 const MemAccessInfoList &getDependenciesToCheck() const { return CheckDeps; }

723

724private:

726

727

728

730

731

735 return Loc;

736 }

737

738

739 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {

740 if (!ScopeList)

741 return nullptr;

742

743

744

746 return LoopAliasScopes.contains(cast(Scope));

747 }))

748 return nullptr;

749

750 return ScopeList;

751 }

752

753

754

755 void processMemAccesses();

756

757

758

759 PtrAccessMap Accesses;

760

761

762 const Loop *TheLoop;

763

764

765 MemAccessInfoList CheckDeps;

766

767

769

770

772

773

774

776

777

779

780

781

782

784

785

786

787

788

789

790

791

792 bool IsRTCheckAnalysisNeeded = false;

793

794

796

798

799

800

802};

803

804}

805

806

807

808

810 const SCEV *PtrScev, Loop *L, bool Assume) {

811

813 return true;

814

815 const SCEVAddRecExpr *AR = dyn_cast(PtrScev);

816

817 if (!AR && Assume)

819

820 if (!AR)

821 return false;

822

824}

825

826

829 Type *AccessTy, Loop *L, bool Assume) {

832 return true;

833

834 return getPtrStride(PSE, AccessTy, Ptr, L, Strides, Assume).has_value() ||

836}

837

843

844 while (!WorkList.empty()) {

847 continue;

848 auto *PN = dyn_cast(Ptr);

849

850

851

852 if (PN && InnermostLoop.contains(PN->getParent()) &&

853 PN->getParent() != InnermostLoop.getHeader()) {

854 for (const Use &Inc : PN->incoming_values())

856 } else

857 AddPointer(Ptr);

858 }

859}

860

861

862

863

864

865

866

867

868

869

870

871

872

873

874

875

876

877

881 unsigned Depth) {

882

883

884

885

887 if (isa(Scev) || L->isLoopInvariant(Ptr) ||

888 !isa(Ptr) || Depth == 0) {

890 return;

891 }

892

894

896 return get<1>(S);

897 };

898

899 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {

900 switch (Opcode) {

901 case Instruction::Add:

903 case Instruction::Sub:

905 default:

906 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");

907 }

908 };

909

911 unsigned Opcode = I->getOpcode();

912 switch (Opcode) {

913 case Instruction::GetElementPtr: {

914 auto *GEP = cast(I);

915 Type *SourceTy = GEP->getSourceElementType();

916

917

918 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {

920 break;

921 }

926

927

928 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||

929 any_of(OffsetScevs, UndefPoisonCheck);

930

931

932

933

934 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)

935 BaseScevs.push_back(BaseScevs[0]);

936 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)

937 OffsetScevs.push_back(OffsetScevs[0]);

938 else {

939 ScevList.emplace_back(Scev, NeedsFreeze);

940 break;

941 }

942

943

946

947

948

949

951

952

957 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),

958 NeedsFreeze);

959 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),

960 NeedsFreeze);

961 break;

962 }

963 case Instruction::Select: {

965

966

967

970 if (ChildScevs.size() == 2) {

971 ScevList.push_back(ChildScevs[0]);

972 ScevList.push_back(ChildScevs[1]);

973 } else

975 break;

976 }

977 case Instruction::PHI: {

979

980

981

982 if (I->getNumOperands() == 2) {

985 }

986 if (ChildScevs.size() == 2) {

987 ScevList.push_back(ChildScevs[0]);

988 ScevList.push_back(ChildScevs[1]);

989 } else

991 break;

992 }

993 case Instruction::Add:

994 case Instruction::Sub: {

999

1000

1001 bool NeedsFreeze =

1002 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);

1003

1004

1005

1006

1007 if (LScevs.size() == 2 && RScevs.size() == 1)

1009 else if (RScevs.size() == 2 && LScevs.size() == 1)

1011 else {

1012 ScevList.emplace_back(Scev, NeedsFreeze);

1013 break;

1014 }

1015

1016 ScevList.emplace_back(

1017 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),

1018 NeedsFreeze);

1019 ScevList.emplace_back(

1020 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),

1021 NeedsFreeze);

1022 break;

1023 }

1024 default:

1025

1026 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");

1028 break;

1029 }

1030}

1031

1035 const Loop *L) {

1040

1041

1042

1043 if (Scevs.size() == 2 &&

1044 (isa(get<0>(Scevs[0])) ||

1046 (isa(get<0>(Scevs[1])) ||

1048 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");

1049 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");

1050 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");

1051 return Scevs;

1052 }

1053

1055}

1056

1058 MemAccessInfo Access, Type *AccessTy,

1061 Loop *TheLoop, unsigned &RunningDepId,

1062 unsigned ASId, bool ShouldCheckWrap,

1063 bool Assume) {

1065

1068

1069 for (const auto &P : TranslatedPtrs) {

1070 const SCEV *PtrExpr = get<0>(P);

1072 return false;

1073

1074

1075

1076 if (ShouldCheckWrap) {

1077

1078 if (TranslatedPtrs.size() > 1)

1079 return false;

1080

1081 if (isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop, Assume))

1082 return false;

1083 }

1084

1085

1086 if (TranslatedPtrs.size() == 1)

1088 false};

1089 }

1090

1091 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {

1092

1093 unsigned DepId;

1094

1095 if (isDependencyCheckNeeded()) {

1097 unsigned &LeaderId = DepSetId[Leader];

1098 if (!LeaderId)

1099 LeaderId = RunningDepId++;

1100 DepId = LeaderId;

1101 } else

1102

1103 DepId = RunningDepId++;

1104

1105 bool IsWrite = Access.getInt();

1106 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,

1107 NeedsFreeze);

1108 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');

1109 }

1110

1111 return true;

1112}

1113

1117 Value *&UncomputablePtr, bool ShouldCheckWrap) {

1118

1119

1120 bool CanDoRT = true;

1121

1122 bool MayNeedRTCheck = false;

1123 if (!IsRTCheckAnalysisNeeded) return true;

1124

1125 bool IsDepCheckNeeded = isDependencyCheckNeeded();

1126

1127

1128

1129 unsigned ASId = 0;

1130 for (const auto &AS : AST) {

1131 int NumReadPtrChecks = 0;

1132 int NumWritePtrChecks = 0;

1133 bool CanDoAliasSetRT = true;

1134 ++ASId;

1135 auto ASPointers = AS.getPointers();

1136

1137

1138

1139 unsigned RunningDepId = 1;

1141

1143

1144

1145

1147 for (const Value *ConstPtr : ASPointers) {

1149 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));

1150 if (IsWrite)

1151 ++NumWritePtrChecks;

1152 else

1153 ++NumReadPtrChecks;

1155 }

1156

1157

1158

1159 if (NumWritePtrChecks == 0 ||

1160 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {

1161 assert((ASPointers.size() <= 1 ||

1164 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),

1165 true);

1166 return DepCands.findValue(AccessWrite) == DepCands.end();

1167 })) &&

1168 "Can only skip updating CanDoRT below, if all entries in AS "

1169 "are reads or there is at most 1 entry");

1170 continue;

1171 }

1172

1173 for (auto &Access : AccessInfos) {

1174 for (const auto &AccessTy : Accesses[Access]) {

1175 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,

1176 DepSetId, TheLoop, RunningDepId, ASId,

1177 ShouldCheckWrap, false)) {

1178 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"

1179 << *Access.getPointer() << '\n');

1181 CanDoAliasSetRT = false;

1182 }

1183 }

1184 }

1185

1186

1187

1188

1189

1190

1191

1192

1193

1194

1195 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();

1196

1197

1198

1199 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {

1200

1201

1202

1203 CanDoAliasSetRT = true;

1204 for (const auto &[Access, AccessTy] : Retries) {

1205 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,

1206 DepSetId, TheLoop, RunningDepId, ASId,

1207 ShouldCheckWrap, true)) {

1208 CanDoAliasSetRT = false;

1209 UncomputablePtr = Access.getPointer();

1210 break;

1211 }

1212 }

1213 }

1214

1215 CanDoRT &= CanDoAliasSetRT;

1216 MayNeedRTCheck |= NeedsAliasSetRTCheck;

1217 ++ASId;

1218 }

1219

1220

1221

1222

1223

1224

1225 unsigned NumPointers = RtCheck.Pointers.size();

1226 for (unsigned i = 0; i < NumPointers; ++i) {

1227 for (unsigned j = i + 1; j < NumPointers; ++j) {

1228

1229 if (RtCheck.Pointers[i].DependencySetId ==

1230 RtCheck.Pointers[j].DependencySetId)

1231 continue;

1232

1233 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)

1234 continue;

1235

1238

1241 if (ASi != ASj) {

1243 dbgs() << "LAA: Runtime check would require comparison between"

1244 " different address spaces\n");

1245 return false;

1246 }

1247 }

1248 }

1249

1250 if (MayNeedRTCheck && CanDoRT)

1252

1254 << " pointer comparisons.\n");

1255

1256

1257

1258

1260

1261 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;

1262 if (!CanDoRTIfNeeded)

1263 RtCheck.reset();

1264 return CanDoRTIfNeeded;

1265}

1266

1267void AccessAnalysis::processMemAccesses() {

1268

1269

1270

1271

1272 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");

1274 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");

1276 for (const auto &[A, _] : Accesses)

1277 dbgs() << "\t" << *A.getPointer() << " ("

1278 << (A.getInt() ? "write"

1279 : (ReadOnlyPtr.count(A.getPointer()) ? "read-only"

1280 : "read"))

1281 << ")\n";

1282 });

1283

1284

1285

1286

1287

1288 for (const auto &AS : AST) {

1289

1290

1291

1292 auto ASPointers = AS.getPointers();

1293

1294 bool SetHasWrite = false;

1295

1296

1298 UnderlyingObjToAccessMap ObjToLastAccess;

1299

1300

1301 PtrAccessMap DeferredAccesses;

1302

1303

1304

1305 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {

1306 bool UseDeferred = SetIteration > 0;

1307 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;

1308

1309 for (const Value *ConstPtr : ASPointers) {

1311

1312

1313

1314 for (const auto &[AC, _] : S) {

1315 if (AC.getPointer() != Ptr)

1316 continue;

1317

1318 bool IsWrite = AC.getInt();

1319

1320

1321

1322 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;

1323 if (UseDeferred && !IsReadOnlyPtr)

1324 continue;

1325

1326

1327 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||

1328 S.count(MemAccessInfo(Ptr, false))) &&

1329 "Alias-set pointer not in the access set?");

1330

1331 MemAccessInfo Access(Ptr, IsWrite);

1333

1334

1335

1336

1337

1338

1339 if (!UseDeferred && IsReadOnlyPtr) {

1340

1341

1342 DeferredAccesses.insert({Access, {}});

1343 continue;

1344 }

1345

1346

1347

1348

1349

1350 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {

1351 CheckDeps.push_back(Access);

1352 IsRTCheckAnalysisNeeded = true;

1353 }

1354

1355 if (IsWrite)

1356 SetHasWrite = true;

1357

1358

1359

1361 ValueVector TempObjects;

1362

1363 UnderlyingObjects[Ptr] = {};

1367 << "Underlying objects for pointer " << *Ptr << "\n");

1368 for (const Value *UnderlyingObj : UOs) {

1369

1370

1371 if (isa(UnderlyingObj) &&

1375 continue;

1376

1377 UnderlyingObjToAccessMap::iterator Prev =

1378 ObjToLastAccess.find(UnderlyingObj);

1379 if (Prev != ObjToLastAccess.end())

1381

1382 ObjToLastAccess[UnderlyingObj] = Access;

1383 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");

1384 }

1385 }

1386 }

1387 }

1388 }

1389}

1390

1391

1392

1395

1396

1398 return true;

1399

1401 return true;

1402

1403

1404

1405

1406

1407

1408

1409

1410

1411 const auto *GEP = dyn_cast(Ptr);

1412 if (GEP || GEP->hasNoUnsignedSignedWrap())

1413 return false;

1414

1415

1416 Value *NonConstIndex = nullptr;

1417 for (Value *Index : GEP->indices())

1418 if (!isa(Index)) {

1419 if (NonConstIndex)

1420 return false;

1421 NonConstIndex = Index;

1422 }

1423 if (!NonConstIndex)

1424

1425 return false;

1426

1427

1428

1429 if (auto *OBO = dyn_cast(NonConstIndex))

1430 if (OBO->hasNoSignedWrap() &&

1431

1432

1433 isa(OBO->getOperand(1))) {

1434 const SCEV *OpScev = PSE.getSCEV(OBO->getOperand(0));

1435

1436 if (auto *OpAR = dyn_cast(OpScev))

1437 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);

1438 }

1439

1440 return false;

1441}

1442

1443

1444std::optional<int64_t>

1446 const Loop *Lp,

1448 bool Assume, bool ShouldCheckWrap) {

1451 return {0};

1452

1453 Type *Ty = Ptr->getType();

1455 if (isa(AccessTy)) {

1456 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy

1457 << "\n");

1458 return std::nullopt;

1459 }

1460

1461 const SCEVAddRecExpr *AR = dyn_cast(PtrScev);

1462 if (Assume && !AR)

1464

1465 if (!AR) {

1466 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr

1467 << " SCEV: " << *PtrScev << "\n");

1468 return std::nullopt;

1469 }

1470

1471

1472 if (Lp != AR->getLoop()) {

1473 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "

1474 << *Ptr << " SCEV: " << *AR << "\n");

1475 return std::nullopt;

1476 }

1477

1478

1480

1481

1482 const SCEVConstant *C = dyn_cast(Step);

1483 if (C) {

1484 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr

1485 << " SCEV: " << *AR << "\n");

1486 return std::nullopt;

1487 }

1488

1490 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);

1492 const APInt &APStepVal = C->getAPInt();

1493

1494

1496 return std::nullopt;

1497

1499

1500

1501 int64_t Stride = StepVal / Size;

1502 int64_t Rem = StepVal % Size;

1503 if (Rem)

1504 return std::nullopt;

1505

1506 if (!ShouldCheckWrap)

1507 return Stride;

1508

1509

1510

1512 return Stride;

1513

1514

1515

1516

1517

1518

1519 if (auto *GEP = dyn_cast(Ptr);

1520 GEP && GEP->hasNoUnsignedSignedWrap())

1521 return Stride;

1522

1523

1524

1525

1528 (Stride == 1 || Stride == -1))

1529 return Stride;

1530

1531 if (Assume) {

1534 << "LAA: Pointer: " << *Ptr << "\n"

1535 << "LAA: SCEV: " << *AR << "\n"

1536 << "LAA: Added an overflow assumption\n");

1537 return Stride;

1538 }

1540 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "

1541 << *Ptr << " SCEV: " << *AR << "\n");

1542 return std::nullopt;

1543}

1544

1550 assert(PtrA && PtrB && "Expected non-nullptr pointers.");

1551

1552

1553 if (PtrA == PtrB)

1554 return 0;

1555

1556

1557 if (CheckType && ElemTyA != ElemTyB)

1558 return std::nullopt;

1559

1562

1563

1564 if (ASA != ASB)

1565 return std::nullopt;

1566 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);

1567

1568 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);

1570 DL, OffsetA, true);

1572 DL, OffsetB, true);

1573

1574 int Val;

1575 if (PtrA1 == PtrB1) {

1576

1577

1578 ASA = cast(PtrA1->getType())->getAddressSpace();

1579 ASB = cast(PtrB1->getType())->getAddressSpace();

1580

1581 if (ASA != ASB)

1582 return std::nullopt;

1583

1584 IdxWidth = DL.getIndexSizeInBits(ASA);

1585 OffsetA = OffsetA.sextOrTrunc(IdxWidth);

1586 OffsetB = OffsetB.sextOrTrunc(IdxWidth);

1587

1588 OffsetB -= OffsetA;

1590 } else {

1591

1592 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);

1593 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);

1594 std::optional Diff =

1596 if (!Diff)

1597 return std::nullopt;

1598 Val = Diff->getSExtValue();

1599 }

1600 int Size = DL.getTypeStoreSize(ElemTyA);

1601 int Dist = Val / Size;

1602

1603

1604

1605 if (!StrictCheck || Dist * Size == Val)

1606 return Dist;

1607 return std::nullopt;

1608}

1609

1614 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&

1615 "Expected list of pointer operands.");

1616

1617

1618 Value *Ptr0 = VL[0];

1619

1620 using DistOrdPair = std::pair<int64_t, int>;

1622 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);

1623 Offsets.emplace(0, 0);

1624 bool IsConsecutive = true;

1626 std::optional Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,

1627 true);

1628 if (!Diff)

1629 return false;

1630

1631

1632 int64_t Offset = *Diff;

1633 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);

1634 if (!IsInserted)

1635 return false;

1636

1637 IsConsecutive &= std::next(It) == Offsets.end();

1638 }

1639 SortedIndices.clear();

1640 if (!IsConsecutive) {

1641

1644 SortedIndices[Idx] = Off.second;

1645 }

1646 return true;

1647}

1648

1649

1654 if (!PtrA || !PtrB)

1655 return false;

1658 std::optional Diff =

1660 true, CheckType);

1661 return Diff && *Diff == 1;

1662}

1663

1665 visitPointers(SI->getPointerOperand(), *InnermostLoop,

1667 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);

1668 InstMap.push_back(SI);

1669 ++AccessIdx;

1670 });

1671}

1672

1676 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);

1677 InstMap.push_back(LI);

1678 ++AccessIdx;

1679 });

1680}

1681

1684 switch (Type) {

1689

1697 }

1699}

1700

1702 switch (Type) {

1703 case NoDep:

1704 case Forward:

1705 case ForwardButPreventsForwarding:

1707 case IndirectUnsafe:

1708 return false;

1709

1710 case BackwardVectorizable:

1711 case Backward:

1712 case BackwardVectorizableButPreventsForwarding:

1713 return true;

1714 }

1716}

1717

1719 return isBackward() || Type == Unknown || Type == IndirectUnsafe;

1720}

1721

1723 switch (Type) {

1724 case Forward:

1725 case ForwardButPreventsForwarding:

1726 return true;

1727

1728 case NoDep:

1730 case BackwardVectorizable:

1731 case Backward:

1732 case BackwardVectorizableButPreventsForwarding:

1733 case IndirectUnsafe:

1734 return false;

1735 }

1737}

1738

1739bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,

1741

1742

1743

1744

1745

1746

1747

1748

1749

1750

1751

1752

1753 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;

1754

1755 uint64_t MaxVFWithoutSLForwardIssues = std::min(

1757

1758

1759 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;

1760 VF *= 2) {

1761

1762

1763 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {

1764 MaxVFWithoutSLForwardIssues = (VF >> 1);

1765 break;

1766 }

1767 }

1768

1769 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {

1771 dbgs() << "LAA: Distance " << Distance

1772 << " that could cause a store-load forwarding conflict\n");

1773 return true;

1774 }

1775

1776 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&

1777 MaxVFWithoutSLForwardIssues !=

1779 MinDepDistBytes = MaxVFWithoutSLForwardIssues;

1780 return false;

1781}

1782

1786}

1787

1788

1789

1790

1791

1792

1793

1794

1795

1796

1797

1798

1799

1801 const SCEV &MaxBTC, const SCEV &Dist,

1804

1805

1806

1807

1808

1809

1810

1811

1812

1813

1814

1815

1816

1817

1818

1819

1820

1821

1822 const uint64_t ByteStride = MaxStride * TypeByteSize;

1825

1826 const SCEV *CastedDist = &Dist;

1827 const SCEV *CastedProduct = Product;

1828 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());

1829 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());

1830

1831

1832

1833

1834 if (DistTypeSizeBits > ProductTypeSizeBits)

1836 else

1838

1839

1840

1843 return true;

1844

1845

1846

1850}

1851

1852

1853

1854

1855

1856

1859 assert(Stride > 1 && "The stride must be greater than 1");

1860 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");

1861 assert(Distance > 0 && "The distance must be non-zero");

1862

1863

1864 if (Distance % TypeByteSize)

1865 return false;

1866

1867 uint64_t ScaledDist = Distance / TypeByteSize;

1868

1869

1870

1871

1872

1873

1874

1875

1876

1877

1878

1879

1880

1881

1882

1883

1884

1885 return ScaledDist % Stride;

1886}

1887

1889 MemoryDepChecker::DepDistanceStrideAndSizeInfo>

1890MemoryDepChecker::getDependenceDistanceStrideAndSize(

1894 auto &SE = *PSE.getSE();

1895 const auto &[APtr, AIsWrite] = A;

1896 const auto &[BPtr, BIsWrite] = B;

1897

1898

1899 if (!AIsWrite && !BIsWrite)

1901

1904

1905

1906 if (APtr->getType()->getPointerAddressSpace() !=

1907 BPtr->getType()->getPointerAddressSpace())

1909

1910 std::optional<int64_t> StrideAPtr =

1911 getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides, true, true);

1912 std::optional<int64_t> StrideBPtr =

1913 getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides, true, true);

1914

1917

1918

1919

1920

1921 if (StrideAPtr && *StrideAPtr < 0) {

1925 std::swap(StrideAPtr, StrideBPtr);

1926 }

1927

1929

1930 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink

1931 << "\n");

1932 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst

1933 << ": " << *Dist << "\n");

1934

1935

1936

1937

1938

1941 const auto &[SrcStart_, SrcEnd_] =

1943 const auto &[SinkStart_, SinkEnd_] =

1945 if (!isa(SrcStart_) &&

1946 !isa(SrcEnd_) &&

1947 !isa(SinkStart_) &&

1948 !isa(SinkEnd_)) {

1949 if (!LoopGuards)

1950 LoopGuards.emplace(

1953 auto SinkStart = SE.applyLoopGuards(SinkStart_, *LoopGuards);

1956

1957 auto SinkEnd = SE.applyLoopGuards(SinkEnd_, *LoopGuards);

1958 auto SrcStart = SE.applyLoopGuards(SrcStart_, *LoopGuards);

1961 }

1962 }

1963

1964

1965

1966

1967

1968

1969

1970

1971 if (!StrideAPtr || !StrideBPtr) {

1972 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");

1974 }

1975

1976 int64_t StrideAPtrInt = *StrideAPtr;

1977 int64_t StrideBPtrInt = *StrideBPtr;

1978 LLVM_DEBUG(dbgs() << "LAA: Src induction step: " << StrideAPtrInt

1979 << " Sink induction step: " << StrideBPtrInt << "\n");

1980

1981

1982 if (!StrideAPtrInt || !StrideBPtrInt)

1984

1985

1986

1987 if ((StrideAPtrInt > 0) != (StrideBPtrInt > 0)) {

1989 dbgs() << "Pointer access with strides in different directions\n");

1991 }

1992

1993 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);

1994 bool HasSameSize =

1995 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);

1996 if (!HasSameSize)

1997 TypeByteSize = 0;

1998

1999 StrideAPtrInt = std::abs(StrideAPtrInt);

2000 StrideBPtrInt = std::abs(StrideBPtrInt);

2001

2002 uint64_t MaxStride = std::max(StrideAPtrInt, StrideBPtrInt);

2003

2004 std::optional<uint64_t> CommonStride;

2005 if (StrideAPtrInt == StrideBPtrInt)

2006 CommonStride = StrideAPtrInt;

2007

2008

2009

2010

2011 bool ShouldRetryWithRuntimeCheck = CommonStride.has_value();

2012

2013 return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride,

2014 ShouldRetryWithRuntimeCheck, TypeByteSize,

2015 AIsWrite, BIsWrite);

2016}

2017

2019MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,

2021 assert(AIdx < BIdx && "Must pass arguments in program order");

2022

2023

2024

2025 auto Res =

2026 getDependenceDistanceStrideAndSize(A, InstMap[AIdx], B, InstMap[BIdx]);

2027 if (std::holds_alternativeDependence::DepType(Res))

2028 return std::getDependence::DepType(Res);

2029

2030 auto &[Dist, MaxStride, CommonStride, ShouldRetryWithRuntimeCheck,

2031 TypeByteSize, AIsWrite, BIsWrite] =

2032 std::get(Res);

2033 bool HasSameSize = TypeByteSize > 0;

2034

2035 if (isa(Dist)) {

2036

2037

2038 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;

2039 LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");

2041 }

2042

2045

2046

2047

2048

2049

2050

2053 *Dist, MaxStride, TypeByteSize))

2055

2056 const SCEVConstant *ConstDist = dyn_cast(Dist);

2057

2058

2059 if (ConstDist) {

2061

2062

2063

2064 if (Distance > 0 && CommonStride && CommonStride > 1 && HasSameSize &&

2066 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");

2068 }

2069 } else {

2070 if (!LoopGuards)

2071 LoopGuards.emplace(

2074 }

2075

2076

2079 if (HasSameSize) {

2080

2082 }

2083 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "

2084 "different type sizes\n");

2086 }

2087

2088 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);

2089

2090

2091

2092

2093

2094

2095

2096

2098 if (!ConstDist) {

2099

2100

2101

2102

2103 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;

2105 }

2106 if (!HasSameSize ||

2107 couldPreventStoreLoadForward(

2110 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");

2112 }

2113 }

2114

2115 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");

2117 }

2118

2120

2121 if (MinDistance <= 0) {

2122 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;

2124 }

2125

2126 if (!ConstDist) {

2127

2128

2129

2130

2131

2132

2133

2134

2135 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;

2136 }

2137

2138 if (!HasSameSize) {

2139 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "

2140 "different type sizes\n");

2142 }

2143

2144 if (!CommonStride)

2146

2147

2152

2153 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);

2154

2155

2156

2157

2158

2159

2160

2161

2162

2163

2164

2165

2166

2167

2168

2169

2170

2171

2172

2173

2174

2175

2176

2177

2178

2179

2180

2181

2182

2183

2184

2185 uint64_t MinDistanceNeeded =

2186 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;

2187 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {

2188 if (!ConstDist) {

2189

2190

2191

2192

2194 }

2195 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "

2196 << MinDistance << '\n');

2198 }

2199

2200

2201

2202 if (MinDistanceNeeded > MinDepDistBytes) {

2203 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "

2204 << MinDistanceNeeded << " size in bytes\n");

2206 }

2207

2208

2209

2210

2211

2212

2213

2214

2215

2216

2217

2218

2219

2220

2221

2222

2223

2224 MinDepDistBytes =

2225 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);

2226

2227 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);

2228 uint64_t MinDepDistBytesOld = MinDepDistBytes;

2230 couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {

2231

2232

2233 assert(MinDepDistBytes == MinDepDistBytesOld &&

2234 "An update to MinDepDistBytes requires an update to "

2235 "MaxSafeVectorWidthInBits");

2236 (void)MinDepDistBytesOld;

2238 }

2239

2240

2241

2242 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);

2243 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance

2244 << " with max VF = " << MaxVF << '\n');

2245

2246 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;

2247 if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) {

2248

2249

2250

2252 }

2253

2254 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);

2256}

2257

2260

2261 MinDepDistBytes = -1;

2264 if (Visited.count(CurAccess))

2265 continue;

2266

2267

2270

2271

2276

2277

2278 while (AI != AE) {

2279 Visited.insert(*AI);

2280 bool AIIsWrite = AI->getInt();

2281

2282

2284 (AIIsWrite ? AI : std::next(AI));

2285 while (OI != AE) {

2286

2287 for (std::vector::iterator I1 = Accesses[*AI].begin(),

2288 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)

2289

2290

2291 for (std::vector::iterator

2292 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),

2293 I2E = (OI == AI ? I1E : Accesses[*OI].end());

2294 I2 != I2E; ++I2) {

2295 auto A = std::make_pair(&*AI, *I1);

2296 auto B = std::make_pair(&*OI, *I2);

2297

2299 if (*I1 > *I2)

2301

2303 isDependent(*A.first, A.second, *B.first, B.second);

2305

2306

2307

2308

2309

2310 if (RecordDependences) {

2312 Dependences.emplace_back(A.second, B.second, Type);

2313

2315 RecordDependences = false;

2316 Dependences.clear();

2318 << "Too many dependences, stopped recording\n");

2319 }

2320 }

2322 return false;

2323 }

2324 ++OI;

2325 }

2326 ++AI;

2327 }

2328 }

2329

2330 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");

2332}

2333

2337 auto &IndexVector = Accesses.find(Access)->second;

2338

2341 std::back_inserter(Insts),

2342 [&](unsigned Idx) { return this->InstMap[Idx]; });

2343 return Insts;

2344}

2345

2347 "NoDep",

2348 "Unknown",

2349 "IndirectUnsafe",

2350 "Forward",

2351 "ForwardButPreventsForwarding",

2352 "Backward",

2353 "BackwardVectorizable",

2354 "BackwardVectorizableButPreventsForwarding"};

2355

2360 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";

2361 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";

2362}

2363

2364bool LoopAccessInfo::canAnalyzeLoop() {

2365

2368 << TheLoop->getLocStr() << "\n");

2369

2370

2372 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");

2373 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";

2374 return false;

2375 }

2376

2377

2380 dbgs() << "LAA: loop control flow is not understood by analyzer\n");

2381 recordAnalysis("CFGNotUnderstood")

2382 << "loop control flow is not understood by analyzer";

2383 return false;

2384 }

2385

2386

2387

2388

2390 if (isa(ExitCount)) {

2391 recordAnalysis("CantComputeNumberOfIterations")

2392 << "could not determine number of loop iterations";

2393 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");

2394 return false;

2395 }

2396

2397 LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "

2399 return true;

2400}

2401

2402bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,

2405

2409

2410

2411 unsigned NumReads = 0;

2412 unsigned NumReadWrites = 0;

2413

2414 bool HasComplexMemInst = false;

2415

2416

2417 HasConvergentOp = false;

2418

2419 PtrRtChecking->Pointers.clear();

2420 PtrRtChecking->Need = false;

2421

2423

2424 const bool EnableMemAccessVersioningOfLoop =

2427

2428

2429

2431 RPOT.perform(LI);

2433

2434

2436 if (auto *Call = dyn_cast(&I)) {

2437 if (Call->isConvergent())

2438 HasConvergentOp = true;

2439 }

2440

2441

2442

2443 if (HasComplexMemInst && HasConvergentOp)

2444 return false;

2445

2446

2447 if (HasComplexMemInst)

2448 continue;

2449

2450

2451 if (auto *Decl = dyn_cast(&I))

2452 for (Metadata *Op : Decl->getScopeList()->operands())

2453 LoopAliasScopes.insert(cast(Op));

2454

2455

2456

2457

2458 auto *Call = dyn_cast(&I);

2460 continue;

2461

2462

2463

2464

2465 if (I.mayReadFromMemory()) {

2466 auto hasPointerArgs = [](CallBase *CB) {

2467 return any_of(CB->args(), [](Value const *Arg) {

2468 return Arg->getType()->isPointerTy();

2469 });

2470 };

2471

2472

2473

2474

2475 if (Call && Call->isNoBuiltin() && Call->getCalledFunction() &&

2477 continue;

2478

2479 auto *Ld = dyn_cast(&I);

2480 if (!Ld) {

2481 recordAnalysis("CantVectorizeInstruction", Ld)

2482 << "instruction cannot be vectorized";

2483 HasComplexMemInst = true;

2484 continue;

2485 }

2486 if (!Ld->isSimple() && !IsAnnotatedParallel) {

2487 recordAnalysis("NonSimpleLoad", Ld)

2488 << "read with atomic ordering or volatile read";

2489 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");

2490 HasComplexMemInst = true;

2491 continue;

2492 }

2493 NumLoads++;

2496 if (EnableMemAccessVersioningOfLoop)

2497 collectStridedAccess(Ld);

2498 continue;

2499 }

2500

2501

2502 if (I.mayWriteToMemory()) {

2503 auto *St = dyn_cast(&I);

2504 if (!St) {

2505 recordAnalysis("CantVectorizeInstruction", St)

2506 << "instruction cannot be vectorized";

2507 HasComplexMemInst = true;

2508 continue;

2509 }

2510 if (!St->isSimple() && !IsAnnotatedParallel) {

2511 recordAnalysis("NonSimpleStore", St)

2512 << "write with atomic ordering or volatile write";

2513 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");

2514 HasComplexMemInst = true;

2515 continue;

2516 }

2517 NumStores++;

2520 if (EnableMemAccessVersioningOfLoop)

2521 collectStridedAccess(St);

2522 }

2523 }

2524 }

2525

2526 if (HasComplexMemInst)

2527 return false;

2528

2529

2530

2531

2532

2533

2534 if (!Stores.size()) {

2535 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");

2536 return true;

2537 }

2538

2540 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,

2541 LoopAliasScopes);

2542

2543

2544

2545

2546

2547

2549

2550

2551

2553

2555 Value *Ptr = ST->getPointerOperand();

2556

2557 if (isInvariant(Ptr)) {

2558

2559 StoresToInvariantAddresses.push_back(ST);

2560 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=

2561 !UniformStores.insert(Ptr).second;

2562 }

2563

2564

2565

2567 if (Seen.insert({Ptr, AccessTy}).second) {

2568 ++NumReadWrites;

2569

2571

2572

2573

2574 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))

2576

2578 [&Accesses, AccessTy, Loc](Value *Ptr) {

2579 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);

2580 Accesses.addStore(NewLoc, AccessTy);

2581 });

2582 }

2583 }

2584

2585 if (IsAnnotatedParallel) {

2587 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "

2588 << "checks.\n");

2589 return true;

2590 }

2591

2592 for (LoadInst *LD : Loads) {

2593 Value *Ptr = LD->getPointerOperand();

2594

2595

2596

2597

2598

2599

2600

2601

2602 bool IsReadOnlyPtr = false;

2604 if (Seen.insert({Ptr, AccessTy}).second ||

2605 getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {

2606 ++NumReads;

2607 IsReadOnlyPtr = true;

2608 }

2609

2610

2611

2612 if (UniformStores.count(Ptr)) {

2613 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "

2614 "load and uniform store to the same address!\n");

2615 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;

2616 }

2617

2619

2620

2621

2622 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))

2624

2626 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {

2627 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);

2628 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);

2629 });

2630 }

2631

2632

2633

2634 if (NumReadWrites == 1 && NumReads == 0) {

2635 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");

2636 return true;

2637 }

2638

2639

2640

2641 Accesses.buildDependenceSets();

2642

2643

2644

2645 Value *UncomputablePtr = nullptr;

2646 bool CanDoRTIfNeeded =

2647 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,

2648 SymbolicStrides, UncomputablePtr, false);

2649 if (!CanDoRTIfNeeded) {

2650 const auto *I = dyn_cast_or_null(UncomputablePtr);

2651 recordAnalysis("CantIdentifyArrayBounds", I)

2652 << "cannot identify array bounds";

2653 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "

2654 << "the array bounds.\n");

2655 return false;

2656 }

2657

2659 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");

2660

2661 bool DepsAreSafe = true;

2662 if (Accesses.isDependencyCheckNeeded()) {

2663 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");

2664 DepsAreSafe = DepChecker->areDepsSafe(DependentAccesses,

2665 Accesses.getDependenciesToCheck());

2666

2668 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");

2669

2670

2671 Accesses.resetDepChecks(*DepChecker);

2672

2673 PtrRtChecking->reset();

2674 PtrRtChecking->Need = true;

2675

2676 auto *SE = PSE->getSE();

2677 UncomputablePtr = nullptr;

2678 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(

2679 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);

2680

2681

2682 if (!CanDoRTIfNeeded) {

2683 auto *I = dyn_cast_or_null(UncomputablePtr);

2684 recordAnalysis("CantCheckMemDepsAtRunTime", I)

2685 << "cannot check memory dependencies at runtime";

2686 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");

2687 return false;

2688 }

2689 DepsAreSafe = true;

2690 }

2691 }

2692

2693 if (HasConvergentOp) {

2694 recordAnalysis("CantInsertRuntimeCheckWithConvergent")

2695 << "cannot add control dependency to convergent operation";

2696 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "

2697 "would be needed with a convergent operation\n");

2698 return false;

2699 }

2700

2701 if (DepsAreSafe) {

2703 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"

2704 << (PtrRtChecking->Need ? "" : " don't")

2705 << " need runtime memory checks.\n");

2706 return true;

2707 }

2708

2709 emitUnsafeDependenceRemark();

2710 return false;

2711}

2712

2713void LoopAccessInfo::emitUnsafeDependenceRemark() {

2714 const auto *Deps = getDepChecker().getDependences();

2715 if (!Deps)

2716 return;

2717 const auto *Found =

2721 });

2722 if (Found == Deps->end())

2723 return;

2725

2726 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");

2727

2728

2729 bool HasForcedDistribution = false;

2730 std::optional<const MDOperand *> Value =

2734 assert(Op && mdconst::hasa(*Op) && "invalid metadata");

2735 HasForcedDistribution = mdconst::extract(*Op)->getZExtValue();

2736 }

2737

2738 const std::string Info =

2739 HasForcedDistribution

2740 ? "unsafe dependent memory operations in loop."

2741 : "unsafe dependent memory operations in loop. Use "

2742 "#pragma clang loop distribute(enable) to allow loop distribution "

2743 "to attempt to isolate the offending operations into a separate "

2744 "loop";

2746 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;

2747

2748 switch (Dep.Type) {

2754 R << "\nBackward loop carried data dependence.";

2755 break;

2757 R << "\nForward loop carried data dependence that prevents "

2758 "store-to-load forwarding.";

2759 break;

2761 R << "\nBackward loop carried data dependence that prevents "

2762 "store-to-load forwarding.";

2763 break;

2765 R << "\nUnsafe indirect dependence.";

2766 break;

2768 R << "\nUnknown data dependence.";

2769 break;

2770 }

2771

2773 DebugLoc SourceLoc = I->getDebugLoc();

2775 SourceLoc = DD->getDebugLoc();

2776 if (SourceLoc)

2777 R << " Memory location is the same as accessed at "

2778 << ore::NV("Location", SourceLoc);

2779 }

2780}

2781

2784 assert(TheLoop->contains(BB) && "Unknown block used");

2785

2786

2788 return !DT->dominates(BB, Latch);

2789}

2790

2793 assert(!Report && "Multiple reports generated");

2794

2797

2798 if (I) {

2799 CodeRegion = I->getParent();

2800

2801

2802 if (I->getDebugLoc())

2803 DL = I->getDebugLoc();

2804 }

2805

2806 Report = std::make_unique(DEBUG_TYPE, RemarkName, DL,

2807 CodeRegion);

2808 return *Report;

2809}

2810

2812 auto *SE = PSE->getSE();

2813

2814

2816 return false;

2819}

2820

2821

2822

2823

2828

2829

2831

2833 std::advance(GEPTI, LastOperand - 2);

2834

2835

2836

2840 if (ElemSize != GEPAllocSize)

2841 break;

2842 --LastOperand;

2843 }

2844

2845 return LastOperand;

2846}

2847

2848

2849

2850

2852 auto *GEP = dyn_cast(Ptr);

2853 if (GEP)

2854 return Ptr;

2855

2857

2858

2859

2860 for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I)

2861 if (I != InductionOperand &&

2863 return Ptr;

2864 return GEP->getOperand(InductionOperand);

2865}

2866

2867

2868

2870 auto *PtrTy = dyn_cast(Ptr->getType());

2871 if (!PtrTy || PtrTy->isAggregateType())

2872 return nullptr;

2873

2874

2875

2876

2878

2879

2880 int64_t PtrAccessSize = 1;

2881

2884

2885 if (Ptr != OrigPtr)

2886

2888 V = C->getOperand();

2889

2890 const SCEVAddRecExpr *S = dyn_cast(V);

2891 if (!S)

2892 return nullptr;

2893

2894

2895

2897 return nullptr;

2898

2900 if (!V)

2901 return nullptr;

2902

2903

2904

2905 if (OrigPtr == Ptr) {

2906 if (const SCEVMulExpr *M = dyn_cast(V)) {

2907 if (M->getOperand(0)->getSCEVType() != scConstant)

2908 return nullptr;

2909

2910 const APInt &APStepVal = cast(M->getOperand(0))->getAPInt();

2911

2912

2914 return nullptr;

2915

2917 if (PtrAccessSize != StepVal)

2918 return nullptr;

2919 V = M->getOperand(1);

2920 }

2921 }

2922

2923

2924

2926 return nullptr;

2927

2928

2929 if (isa(V))

2930 return V;

2931

2932 if (const auto *C = dyn_cast(V))

2933 if (isa(C->getOperand()))

2934 return V;

2935

2936 return nullptr;

2937}

2938

2939void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {

2941 if (Ptr)

2942 return;

2943

2944

2945

2946

2947

2948

2949

2951 if (!StrideExpr)

2952 return;

2953

2954 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "

2955 "versioning:");

2956 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");

2957

2959 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");

2960 return;

2961 }

2962

2963

2964

2965

2966

2967

2968

2969

2970

2971

2972

2973

2974

2975

2977

2978

2979

2980

2982 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());

2983 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());

2984 const SCEV *CastedStride = StrideExpr;

2985 const SCEV *CastedBECount = MaxBTC;

2987 if (BETypeSizeBits >= StrideTypeSizeBits)

2989 else

2991 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);

2992

2993

2994

2997 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "

2998 "Stride==1 predicate will imply that the loop executes "

2999 "at most once.\n");

3000 return;

3001 }

3002 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");

3003

3004

3005

3006 const SCEV *StrideBase = StrideExpr;

3007 if (const auto *C = dyn_cast(StrideBase))

3008 StrideBase = C->getOperand();

3009 SymbolicStrides[Ptr] = cast(StrideBase);

3010}

3011

3017 PtrRtChecking(nullptr), TheLoop(L) {

3018 unsigned MaxTargetVectorWidthInBits = std::numeric_limits::max();

3019 if (TTI) {

3023

3024

3025 MaxTargetVectorWidthInBits = FixedWidth.getFixedValue() * 2;

3026 }

3027

3031 MaxTargetVectorWidthInBits = std::numeric_limits::max();

3032 }

3033 DepChecker = std::make_unique(*PSE, L, SymbolicStrides,

3034 MaxTargetVectorWidthInBits);

3035 PtrRtChecking = std::make_unique(*DepChecker, SE);

3036 if (canAnalyzeLoop())

3037 CanVecMem = analyzeLoop(AA, LI, TLI, DT);

3038}

3039

3041 if (CanVecMem) {

3042 OS.indent(Depth) << "Memory dependences are safe";

3045 OS << " with a maximum safe vector width of "

3047 if (PtrRtChecking->Need)

3048 OS << " with run-time checks";

3049 OS << "\n";

3050 }

3051

3052 if (HasConvergentOp)

3053 OS.indent(Depth) << "Has convergent operation in loop\n";

3054

3055 if (Report)

3056 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";

3057

3058 if (auto *Dependences = DepChecker->getDependences()) {

3060 for (const auto &Dep : *Dependences) {

3062 OS << "\n";

3063 }

3064 } else

3065 OS.indent(Depth) << "Too many dependences, not recorded\n";

3066

3067

3068 PtrRtChecking->print(OS, Depth);

3069 OS << "\n";

3070

3072 << "Non vectorizable stores to invariant address were "

3073 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||

3074 HasLoadStoreDependenceInvolvingLoopInvariantAddress

3075 ? ""

3076 : "not ")

3077 << "found in loop.\n";

3078

3081

3082 OS << "\n";

3083

3084 OS.indent(Depth) << "Expressions re-written:\n";

3086}

3087

3089 const auto &[It, Inserted] = LoopAccessInfoMap.insert({&L, nullptr});

3090

3091 if (Inserted)

3092 It->second =

3093 std::make_unique(&L, &SE, TTI, TLI, &AA, &DT, &LI);

3094

3095 return *It->second;

3096}

3099

3100

3101

3102

3103 for (const auto &[L, LAI] : LoopAccessInfoMap) {

3104 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&

3105 LAI->getPSE().getPredicate().isAlwaysTrue())

3106 continue;

3108 }

3109

3111 LoopAccessInfoMap.erase(L);

3112}

3113

3117

3120

3121 return true;

3122

3123

3124

3125

3130}

3131

3141}

3142

This file implements a class to represent arbitrary precision integral constant values and operations...

ReachingDefAnalysis InstSet & ToRemove

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")

Analysis containing CSE Info

This file contains the declarations for the subclasses of Constant, which represent the different fla...

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

This file defines the DenseMap class.

Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...

This header defines various interfaces for pass management in LLVM.

static std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, PredicatedScalarEvolution &PSE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > &PointerBounds)

Calculate Start and End points of memory access.

static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))

We collect dependences up to this threshold.

static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))

Enable store-to-load forwarding conflict detection.

static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)

static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)

Check whether a pointer can participate in a runtime bounds check.

static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))

The maximum iterations used to merge memory checks.

static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)

Get the stride of a pointer access in a loop.

static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)

Find the operand of the GEP that should be checked for consecutive stores.

static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))

static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L, bool Assume)

Check whether a pointer address cannot wrap.

static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))

static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))

static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride, uint64_t TypeByteSize)

Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...

static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))

static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)

static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)

Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.

static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)

If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.

static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)

Check the dependence for two accesses with the same stride Stride.

static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)

Compare I and J and return the minimum.

static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))

static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))

static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)

static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))

This enables versioning on the strides of symbolically striding memory accesses in code like the foll...

This header provides classes for managing per-loop analyses.

This file provides utility analysis objects describing memory locations.

FunctionAnalysisManager FAM

This file defines the PointerIntPair class.

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)

This file implements a set that has insertion order iteration characteristics.

This file defines the SmallPtrSet class.

This file defines the SmallSet class.

This file defines the SmallVector class.

static SymbolRef::Type getType(const Symbol *Sym)

This pass exposes codegen information to IR-level passes.

static const X86InstrFMA3Group Groups[]

A manager for alias analyses.

Class for arbitrary precision integers.

uint64_t getZExtValue() const

Get zero extended value.

APInt abs() const

Get the absolute value.

unsigned getBitWidth() const

Return the number of bits in the APInt.

APInt sextOrTrunc(unsigned width) const

Sign extend or truncate to width.

int64_t getSExtValue() const

Get sign extended value.

This templated class represents "all analyses that operate over " (e....

API to communicate dependencies between analyses during invalidation.

bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)

Trigger the invalidation of some other analysis pass if not already handled and return whether it was...

A container for analyses that lazily runs them and caches their results.

PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)

Get the result of an analysis pass for a given IR unit.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

LLVM Basic Block Representation.

const Function * getParent() const

Return the enclosing method, or null if none.

const DataLayout & getDataLayout() const

Get the data layout of the module this basic block belongs to.

This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

@ ICMP_ULE

unsigned less or equal

This class represents an Operation in the Expression.

A parsed version of the target data layout string in and methods for querying it.

iterator find(const_arg_type_t< KeyT > Val)

Analysis pass which computes a DominatorTree.

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

bool dominates(const BasicBlock *BB, const Use &U) const

Return true if the (end of the) basic block BB dominates the use U.

EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...

iterator findValue(const ElemTy &V) const

findValue - Return an iterator to the specified value.

iterator insert(const ElemTy &Data)

insert - Insert a new value into the union/find set, ignoring the request if the value already exists...

member_iterator member_end() const

typename std::set< ECValue, ECValueComparator >::const_iterator iterator

iterator* - Provides a way to iterate over all values in the set.

member_iterator member_begin(iterator I) const

member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)

union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...

const ElemTy & getLeaderValue(const ElemTy &V) const

getLeaderValue - Return the leader for the specified value that is in the set.

bool hasOptSize() const

Optimize this function for size (-Os) or minimum size (-Oz).

an instruction for type-safe pointer arithmetic to access elements of arrays and structs

Type * getResultElementType() const

PointerType * getType() const

Global values are always pointers.

const DataLayout & getDataLayout() const

Get the data layout of the module this instruction belongs to.

Class to represent integer types.

static IntegerType * get(LLVMContext &C, unsigned NumBits)

This static method is the primary way of constructing an IntegerType.

An instruction for reading from memory.

Value * getPointerOperand()

static constexpr LocationSize beforeOrAfterPointer()

Any location before or after the base pointer (but still within the underlying object).

This analysis provides dependence information for the memory accesses of a loop.

Result run(Function &F, FunctionAnalysisManager &AM)

bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)

const LoopAccessInfo & getInfo(Loop &L)

Drive the analysis of memory accesses in the loop.

const MemoryDepChecker & getDepChecker() const

the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...

bool isInvariant(Value *V) const

Returns true if value V is loop invariant.

void print(raw_ostream &OS, unsigned Depth=0) const

Print the information about the memory accesses in the loop.

static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)

Return true if the block BB needs to be predicated in order for the loop to be vectorized.

LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)

Analysis pass that exposes the LoopInfo for a function.

bool contains(const LoopT *L) const

Return true if the specified loop is contained within in this loop.

BlockT * getLoopLatch() const

If there is a single latch block for this loop, return it.

bool isInnermost() const

Return true if the loop does not contain any (natural) loops.

unsigned getNumBackEdges() const

Calculate the number of back edges to the loop header.

BlockT * getHeader() const

LoopT * getParentLoop() const

Return the parent loop if it exists or nullptr for top level loops.

Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...

Represents a single loop in the control flow graph.

std::string getLocStr() const

Return a string containing the debug location of the loop (file name + line number if present,...

bool isAnnotatedParallel() const

Returns true if the loop is annotated parallel.

DebugLoc getStartLoc() const

Return the debug location of the start of this loop.

ArrayRef< MDOperand > operands() const

Tracking metadata reference owned by Metadata.

This class implements a map that also provides access to all stored values in a deterministic order.

Checks memory dependences among accesses to the same underlying object to determine whether there vec...

ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const

Return the program order indices for the access location (Ptr, IsWrite).

bool isSafeForAnyVectorWidth() const

Return true if the number of elements that are safe to operate on simultaneously is not bounded.

bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)

Check whether the dependencies between the accesses are safe.

const SmallVectorImpl< Instruction * > & getMemoryInstructions() const

The vector of memory access instructions.

const Loop * getInnermostLoop() const

uint64_t getMaxSafeVectorWidthInBits() const

Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...

bool isSafeForVectorization() const

No memory dependence was encountered that would inhibit vectorization.

const SmallVectorImpl< Dependence > * getDependences() const

Returns the memory dependences.

DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > & getPointerBounds()

SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const

Find the set of instructions that read or write via Ptr.

VectorizationSafetyStatus

Type to keep track of the status of the dependence check.

@ PossiblySafeWithRtChecks

bool shouldRetryWithRuntimeCheck() const

In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...

void addAccess(StoreInst *SI)

Register the location (instructions are given increasing numbers) of a write access.

PointerIntPair< Value *, 1, bool > MemAccessInfo

Representation for a specific memory location.

static MemoryLocation get(const LoadInst *LI)

Return a location with information about the memory reference by the given instruction.

LocationSize Size

The maximum size of the location, in address-units, or UnknownSize if the size is not known.

AAMDNodes AATags

The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...

const Value * Ptr

The address of the start of the location.

An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...

void addPredicate(const SCEVPredicate &Pred)

Adds a new predicate.

ScalarEvolution * getSE() const

Returns the ScalarEvolution analysis used.

const SCEVPredicate & getPredicate() const

bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)

Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.

void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)

Proves that V doesn't overflow by adding SCEV predicate.

void print(raw_ostream &OS, unsigned Depth) const

Print the SCEV mappings done by the Predicated Scalar Evolution.

const SCEVAddRecExpr * getAsAddRec(Value *V)

Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.

const SCEV * getSymbolicMaxBackedgeTakenCount()

Get the (predicated) symbolic max backedge count for the analyzed loop.

const SCEV * getSCEV(Value *V)

Returns the SCEV expression of V, in the context of the current SCEV predicate.

A set of analyses that are preserved following a run of a transformation pass.

PreservedAnalysisChecker getChecker() const

Build a checker for this PreservedAnalyses and the specified analysis type.

Holds information about the memory runtime legality checks to verify that a group of pointers do not ...

bool Need

This flag indicates if we need to add the runtime check.

void reset()

Reset the state of the pointer runtime information.

unsigned getNumberOfChecks() const

Returns the number of run-time checks required according to needsChecking.

void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const

Print Checks.

bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const

Decide if we need to add a check between two groups of pointers, according to needsChecking.

void print(raw_ostream &OS, unsigned Depth=0) const

Print the list run-time memory checks necessary.

SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups

Holds a partitioning of pointers into "check groups".

void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)

Generate the checks and store it.

static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)

Check if pointers are in the same partition.

SmallVector< PointerInfo, 2 > Pointers

Information about the pointers that may require checking.

void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)

Insert a pointer and calculate the start and end SCEVs.

This node represents a polynomial recurrence on the trip count of the specified loop.

const SCEV * getStepRecurrence(ScalarEvolution &SE) const

Constructs and returns the recurrence indicating how much this expression steps by.

bool isAffine() const

Return true if this represents an expression A + B*x where A and B are loop invariant values.

const Loop * getLoop() const

This class represents a constant integer value.

const APInt & getAPInt() const

This is the base class for unary integral cast operator classes.

This node represents multiplication of some number of SCEVs.

NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const

virtual void print(raw_ostream &OS, unsigned Depth=0) const =0

Prints a textual representation of this predicate with an indentation of Depth.

This class represents an analyzed expression in the program.

Type * getType() const

Return the LLVM type of this SCEV expression.

Analysis pass that exposes the ScalarEvolution for a function.

static LoopGuards collect(const Loop *L, ScalarEvolution &SE)

Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...

The main scalar evolution driver.

bool isKnownNonNegative(const SCEV *S)

Test if the given expression is known to be non-negative.

const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)

Return the SCEV object corresponding to -V.

bool isKnownNonPositive(const SCEV *S)

Test if the given expression is known to be non-positive.

const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)

const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)

const SCEV * getConstant(ConstantInt *V)

const SCEV * getSCEV(Value *V)

Return a SCEV expression for the full generality of the specified expression.

const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)

Return a SCEV corresponding to a conversion of the input value to the specified type.

const SCEV * getOne(Type *Ty)

Return a SCEV for the constant 1 of a specific type.

const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)

bool isLoopInvariant(const SCEV *S, const Loop *L)

Return true if the value of the given SCEV is unchanging in the specified loop.

bool isKnownPositive(const SCEV *S)

Test if the given expression is known to be positive.

const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)

bool isSCEVable(Type *Ty) const

Test if values of the given type are analyzable within the SCEV framework.

Type * getEffectiveSCEVType(Type *Ty) const

Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...

const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)

APInt getSignedRangeMin(const SCEV *S)

Determine the min of the signed range for a particular SCEV.

const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)

Return an expression for the store size of StoreTy that is type IntTy.

const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)

Return LHS-RHS.

const SCEV * getCouldNotCompute()

const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)

Try to apply information from loop guards for L to Expr.

const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)

Get a canonical multiply expression, or something simpler if possible.

const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)

Return an expression for a TypeSize.

std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)

Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...

const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)

Get a canonical add expression, or something simpler if possible.

const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)

Return a SCEV corresponding to a conversion of the input value to the specified type.

bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)

Test if the given expression is known to satisfy the condition described by Pred, LHS,...

A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...

size_type count(const T &V) const

count - Return 1 if the element is in the set, 0 otherwise.

std::pair< const_iterator, bool > insert(const T &V)

insert - Insert an element into the set if it isn't already there.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

StringRef - Represent a constant reference to a string, i.e.

Analysis pass providing the TargetTransformInfo.

Analysis pass providing the TargetLibraryInfo.

Provides information about what library functions are available for the current target.

This pass provides access to the codegen interfaces that are needed for IR-level transformations.

TypeSize getRegisterBitWidth(RegisterKind K) const

The instances of the Type class are immutable: once they are created, they are never changed.

bool isVectorTy() const

True if this is an instance of VectorType.

bool isPointerTy() const

True if this is an instance of PointerType.

unsigned getPointerAddressSpace() const

Get the address space of this pointer or pointer vector type.

A Use represents the edge between a Value definition and its users.

Value * getOperand(unsigned i) const

unsigned getNumOperands() const

static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)

Retrieve all the VFInfo instances associated to the CallInst CI.

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const

Accumulate the constant offset this value has compared to a base pointer.

StringRef getName() const

Return a constant reference to the value's name.

constexpr ScalarTy getFixedValue() const

constexpr bool isNonZero() const

An efficient, type-erasing, non-owning reference to a callable.

TypeSize getSequentialElementStride(const DataLayout &DL) const

Type * getIndexedType() const

This class implements an extremely fast bulk output stream that can only output to a stream.

raw_ostream & indent(unsigned NumSpaces)

indent - Insert 'NumSpaces' spaces.

friend const_iterator end(StringRef path)

Get end iterator over path.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

@ C

The default llvm calling convention, compatible with C.

bool match(Val *V, const Pattern &P)

is_zero m_Zero()

Match any null constant or a vector with all elements equal to 0.

initializer< Ty > init(const Ty &Val)

LocationClass< Ty > location(Ty &L)

DiagnosticInfoOptimizationBase::Argument NV

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)

Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...

@ Low

Lower the current thread's priority such that it does not affect foreground tasks significantly.

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)

Returns intrinsic ID for call.

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

unsigned getPointerAddressSpace(const Type *T)

std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)

Find string metadata for loop.

const Value * getLoadStorePointerOperand(const Value *V)

A helper function that returns the pointer operand of a load or store instruction.

const Value * getPointerOperand(const Value *V)

A helper function that returns the pointer operand of a load, store or GEP instruction.

OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)

Wrapper function around std::transform to apply a function to a range and store the result elsewhere.

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

bool NullPointerIsDefined(const Function *F, unsigned AS=0)

Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

bool isPointerTy(const Type *T)

std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)

If the pointer has a constant stride return it in units of the access type size.

bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)

Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...

@ First

Helpers to iterate all locations in the MemoryEffectsBase class.

const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)

Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...

bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)

Returns true if the memory operations A and B are consecutive.

bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)

Return true if this function can prove that V does not have undef bits and is never poison.

OutputIt copy(R &&Range, OutputIt Out)

auto find_if(R &&Range, UnaryPredicate P)

Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.

gep_type_iterator gep_type_begin(const User *GEP)

void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)

This method is similar to getUnderlyingObject except that it can look through phi and select instruct...

Type * getLoadStoreType(const Value *I)

A helper function that returns the type of a load or store instruction.

Implement std::hash so that hash_code can be used in STL containers.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

IR Values for the lower and upper bounds of a pointer evolution.

MDNode * Scope

The tag for alias scope specification (used with noalias).

MDNode * TBAA

The tag for type-based alias analysis.

MDNode * NoAlias

The tag specifying the noalias scope.

A special type used by analysis passes to provide an address that identifies that particular analysis...

Dependece between memory access instructions.

Instruction * getDestination(const MemoryDepChecker &DepChecker) const

Return the destination instruction of the dependence.

DepType Type

The type of the dependence.

bool isPossiblyBackward() const

May be a lexically backward dependence type (includes Unknown).

Instruction * getSource(const MemoryDepChecker &DepChecker) const

Return the source instruction of the dependence.

bool isForward() const

Lexically forward dependence.

bool isBackward() const

Lexically backward dependence.

void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const

Print the dependence.

DepType

The type of the dependence.

@ BackwardVectorizableButPreventsForwarding

@ ForwardButPreventsForwarding

static const char * DepName[]

String version of the types.

static VectorizationSafetyStatus isSafeForVectorization(DepType Type)

Dependence types that don't prevent vectorization.

unsigned AddressSpace

Address space of the involved pointers.

bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)

Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.

bool NeedsFreeze

Whether the pointer needs to be frozen after expansion, e.g.

RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)

Create a new pointer checking group containing a single pointer, with index Index in RtCheck.

const SCEV * High

The SCEV expression which represents the upper bound of all the pointers in this group.

SmallVector< unsigned, 2 > Members

Indices of all the pointers that constitute this grouping.

const SCEV * Low

The SCEV expression which represents the lower bound of all the pointers in this group.

bool IsWritePtr

Holds the information if this pointer is used for writing to memory.

unsigned DependencySetId

Holds the id of the set of pointers that could be dependent because of a shared underlying object.

unsigned AliasSetId

Holds the id of the disjoint alias set to which this pointer belongs.

static const unsigned MaxVectorWidth

Maximum SIMD width.

static unsigned VectorizationFactor

VF as overridden by the user.

static unsigned RuntimeMemoryCheckThreshold

\When performing memory disambiguation checks at runtime do not make more than this number of compari...

static bool isInterleaveForced()

True if force-vector-interleave was specified by the user.

static unsigned VectorizationInterleave

Interleave factor as overridden by the user.

static bool HoistRuntimeChecks

Function object to check whether the first component of a container supported by std::get (like std::...