LLVM: lib/Analysis/MemorySSA.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

30#include "llvm/Config/llvm-config.h"

53#include

54#include

55#include

56#include

57#include

58

59using namespace llvm;

60

61#define DEBUG_TYPE "memoryssa"

62

67

69 true)

74

76 "memssa-check-limit", cl::Hidden, cl::init(100),

77 cl::desc("The maximum number of stores/phis MemorySSA"

78 "will consider trying to walk past (default = 100)"));

79

80

81#ifdef EXPENSIVE_CHECKS

83#else

85#endif

86

90

92

93namespace {

94

95

96

99

100public:

101 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}

102

103 void emitBasicBlockStartAnnot(const BasicBlock *BB,

106 OS << "; " << *MA << "\n";

107 }

108

109 void emitInstructionAnnot(const Instruction *I,

112 OS << "; " << *MA << "\n";

113 }

114};

115

116

117

122

123public:

124 MemorySSAWalkerAnnotatedWriter(MemorySSA *M)

125 : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {}

126

127 void emitBasicBlockStartAnnot(const BasicBlock *BB,

130 OS << "; " << *MA << "\n";

131 }

132

133 void emitInstructionAnnot(const Instruction *I,

137 OS << "; " << *MA;

138 if (Clobber) {

139 OS << " - clobbered by ";

142 else

143 OS << *Clobber;

144 }

145 OS << "\n";

146 }

147 }

148};

149

150}

151

152namespace {

153

154

155

156

157

158

159class MemoryLocOrCall {

160public:

161 bool IsCall = false;

162

163 MemoryLocOrCall(MemoryUseOrDef *MUD)

164 : MemoryLocOrCall(MUD->getMemoryInst()) {}

165 MemoryLocOrCall(const MemoryUseOrDef *MUD)

166 : MemoryLocOrCall(MUD->getMemoryInst()) {}

167

168 MemoryLocOrCall(Instruction *Inst) {

170 IsCall = true;

172 } else {

173 IsCall = false;

174

175

178 }

179 }

180

181 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}

182

183 const CallBase *getCall() const {

186 }

187

188 MemoryLocation getLoc() const {

190 return Loc;

191 }

192

194 if (IsCall != Other.IsCall)

195 return false;

196

197 if (!IsCall)

198 return Loc == Other.Loc;

199

201 return false;

202

205 Other.Call->arg_begin());

206 }

207

208private:

209 union {

210 const CallBase *Call;

211 MemoryLocation Loc;

212 };

213};

214

215}

216

217namespace llvm {

218

223

227

228 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {

229 if (!MLOC.IsCall)

231 MLOC.IsCall,

233

236 MLOC.getCall()->getCalledOperand()));

237

238 for (const Value *Arg : MLOC.getCall()->args())

240 return hash;

241 }

242

243 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {

244 return LHS == RHS;

245 }

246};

247

248}

249

250

251

252

253

254

255

257 const LoadInst *MayClobber) {

258 bool VolatileUse = Use->isVolatile();

259 bool VolatileClobber = MayClobber->isVolatile();

260

261 if (VolatileUse && VolatileClobber)

262 return false;

263

264

265

266

267

268

269

270

271

272

273

277 return !(SeqCstUse || MayClobberIsAcquire);

278}

279

280template

281static bool

283 const Instruction *UseInst, AliasAnalysisType &AA) {

285 assert(DefInst && "Defining instruction not actually an instruction");

286

288

289

290

291

292

293

294

295 switch (II->getIntrinsicID()) {

296 case Intrinsic::allow_runtime_check:

297 case Intrinsic::allow_ubsan_check:

298 case Intrinsic::invariant_start:

299 case Intrinsic::invariant_end:

300 case Intrinsic::assume:

301 case Intrinsic::experimental_noalias_scope_decl:

302 case Intrinsic::pseudoprobe:

303 return false;

304 case Intrinsic::dbg_declare:

305 case Intrinsic::dbg_label:

306 case Intrinsic::dbg_value:

307 llvm_unreachable("debuginfo shouldn't have associated defs!");

308 default:

309 break;

310 }

311 }

312

316 }

317

321

322 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);

324}

325

326template

328 const MemoryLocOrCall &UseMLOC,

329 AliasAnalysisType &AA) {

330

331

332 if (UseMLOC.IsCall)

337}

338

339

344

345namespace {

346

347struct UpwardsMemoryQuery {

348

349 bool IsCall = false;

350

351

353

355

356 const MemoryAccess *OriginalAccess = nullptr;

357 bool SkipSelfAccess = false;

358

359 UpwardsMemoryQuery() = default;

360

362 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {

363 if (!IsCall)

365 }

366};

367

368}

369

370template

373

374

376 return I->hasMetadata(LLVMContext::MD_invariant_load) ||

378 }

379 return false;

380}

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396[[maybe_unused]] static void

400 bool AllowImpreciseClobber = false) {

401 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");

402

405 "liveOnEntry must clobber itself");

406 return;

407 }

408

409 bool FoundClobber = false;

413

414

415 while (!Worklist.empty()) {

417

418

419 if (!VisitedPhis.insert(MAP).second)

420 continue;

421

422 for (const auto *MA : def_chain(MAP.first)) {

423 if (MA == ClobberAt) {

425

426

427

428

429

431 if (!FoundClobber) {

433 FoundClobber = true;

434 }

435 }

436 break;

437 }

438

439

441

443

444 if (MD == Start)

445 continue;

446

448 "Found clobber before reaching ClobberAt!");

449 continue;

450 }

451

453 (void)MU;

454 assert (MU == Start &&

455 "Can only find use in def chain if Start is a use");

456 continue;

457 }

458

460

461

466 ItB != ItE; ++ItB)

469 }

470 }

471

472

473

474

475

476

477 if (AllowImpreciseClobber)

478 return;

479

480

481

483 "ClobberAt never acted as a clobber");

484}

485

486namespace {

487

488

489

490class ClobberWalker {

491

492 using ListIndex = unsigned;

493

494

495

496 struct DefPath {

497 MemoryLocation Loc;

498

499

500 MemoryAccess *First;

501 MemoryAccess *Last;

502 std::optional Previous;

503

504 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,

505 std::optional Previous)

507

508 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,

509 std::optional Previous)

510 : DefPath(Loc, Init, Init, Previous) {}

511 };

512

514 DominatorTree &DT;

515 BatchAAResults *AA;

516 UpwardsMemoryQuery *Query;

517 unsigned *UpwardWalkLimit;

518

519

520

522

523

524 DenseSet VisitedPhis;

525

526

527 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {

529

533 while ((Node = Node->getIDom())) {

535 if (Defs)

536 return &*Defs->rbegin();

537 }

539 }

540

541

542 struct UpwardsWalkResult {

543

544

545 MemoryAccess *Result;

546 bool IsKnownClobber;

547 };

548

549

550

551

552

553

554 UpwardsWalkResult

555 walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,

556 const MemoryAccess *SkipStopAt = nullptr) const {

558 assert(UpwardWalkLimit && "Need a valid walk limit");

559 bool LimitAlreadyReached = false;

560

561

562

563

564 if (!*UpwardWalkLimit) {

565 *UpwardWalkLimit = 1;

566 LimitAlreadyReached = true;

567 }

568

569 for (MemoryAccess *Current : def_chain(Desc.Last)) {

570 Desc.Last = Current;

571 if (Current == StopAt || Current == SkipStopAt)

572 return {Current, false};

573

576 return {MD, true};

577

578 if (!--*UpwardWalkLimit)

579 return {Current, true};

580

582 return {MD, true};

583 }

584 }

585

586 if (LimitAlreadyReached)

587 *UpwardWalkLimit = 0;

588

590 "Ended at a non-clobber that's not a phi?");

591 return {Desc.Last, false};

592 }

593

594 void addSearches(MemoryPhi *Phi, SmallVectorImpl &PausedSearches,

595 ListIndex PriorNode) {

601 }

602 }

603

604

605

606

607 struct TerminatedPath {

608 MemoryAccess *Clobber;

609 ListIndex LastNode;

610 };

611

612

613

614

615

616

617

618

619

620

621 std::optional

622 getBlockingAccess(const MemoryAccess *StopWhere,

623 SmallVectorImpl &PausedSearches,

624 SmallVectorImpl &NewPaused,

625 SmallVectorImpl &Terminated) {

626 assert(!PausedSearches.empty() && "No searches to continue?");

627

628

629

630 while (!PausedSearches.empty()) {

631 ListIndex PathIndex = PausedSearches.pop_back_val();

632 DefPath &Node = Paths[PathIndex];

633

634

635

636

637

638

639

640

641

642

643

644

645

646

647

648

649

650

651

652 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)

653 continue;

654

655 const MemoryAccess *SkipStopWhere = nullptr;

656 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {

658 SkipStopWhere = Query->OriginalAccess;

659 }

660

661 UpwardsWalkResult Res = walkToPhiOrClobber(Node,

662 StopWhere,

663 SkipStopWhere);

664 if (Res.IsKnownClobber) {

665 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);

666

667

668

669 TerminatedPath Term{Res.Result, PathIndex};

670 if (!MSSA.dominates(Res.Result, StopWhere))

672

673

675 continue;

676 }

677

678 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {

679

680

681

682

683 if (Res.Result != SkipStopWhere)

685 continue;

686 }

687

689 addSearches(cast(Res.Result), PausedSearches, PathIndex);

690 }

691

692 return std::nullopt;

693 }

694

695 template <typename T, typename Walker>

696 struct generic_def_path_iterator

697 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,

698 std::forward_iterator_tag, T *> {

699 generic_def_path_iterator() = default;

700 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}

701

702 T &operator*() const { return curNode(); }

703

704 generic_def_path_iterator &operator++() {

705 N = curNode().Previous;

706 return *this;

707 }

708

709 bool operator==(const generic_def_path_iterator &O) const {

710 if (N.has_value() != O.N.has_value())

711 return false;

712 return N || *N == *O.N;

713 }

714

715 private:

716 T &curNode() const { return W->Paths[*N]; }

717

718 Walker *W = nullptr;

719 std::optional N;

720 };

721

722 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;

723 using const_def_path_iterator =

724 generic_def_path_iterator<const DefPath, const ClobberWalker>;

725

727 return make_range(def_path_iterator(this, From), def_path_iterator());

728 }

729

731 return make_range(const_def_path_iterator(this, From),

732 const_def_path_iterator());

733 }

734

735 struct OptznResult {

736

737 TerminatedPath PrimaryClobber;

738

739

741 };

742

743 ListIndex defPathIndex(const DefPath &N) const {

744

745 const DefPath *NP = &N;

747 "Out of bounds DefPath!");

748 return NP - &Paths.front();

749 }

750

751

752

753

754

755

756

757

758

759

760

761

762

763

764 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,

765 const MemoryLocation &Loc) {

767 "Reset the optimization state.");

768

769 Paths.emplace_back(Loc, Start, Phi, std::nullopt);

770

771

772 auto PriorPathsSize = Paths.size();

773

777

778 addSearches(Phi, PausedSearches, 0);

779

780

781

782 auto MoveDominatedPathToEnd = [&](SmallVectorImpl &Paths) {

783 assert(!Paths.empty() && "Need a path to move");

784 auto Dom = Paths.begin();

785 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)

786 if (!MSSA.dominates(I->Clobber, Dom->Clobber))

787 Dom = I;

788 auto Last = Paths.end() - 1;

789 if (Last != Dom)

790 std::iter_swap(Last, Dom);

791 };

792

793 MemoryPhi *Current = Phi;

794 while (true) {

796 "liveOnEntry wasn't treated as a clobber?");

797

798 const auto *Target = getWalkTarget(Current);

799

800

801 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {

802 return MSSA.dominates(P.Clobber, Target);

803 }));

804

805

806

807

808 if (std::optional Blocker = getBlockingAccess(

809 Target, PausedSearches, NewPaused, TerminatedPaths)) {

810

811

812

813 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {

814 return defPathIndex(N) < PriorPathsSize;

815 });

816 assert(Iter != def_path_iterator());

817

818 DefPath &CurNode = *Iter;

819 assert(CurNode.Last == Current);

820

821

822

823

824

825

826

827

828

829

830

831

832

833

834

835

836

837

838

839

840

841

842

843

844

845

846 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};

848 }

849

850

851

852

853 if (NewPaused.empty()) {

854 MoveDominatedPathToEnd(TerminatedPaths);

856 return {Result, std::move(TerminatedPaths)};

857 }

858

859 MemoryAccess *DefChainEnd = nullptr;

861 for (ListIndex Paused : NewPaused) {

862 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);

863 if (WR.IsKnownClobber)

864 Clobbers.push_back({WR.Result, Paused});

865 else

866

867 DefChainEnd = WR.Result;

868 }

869

870 if (!TerminatedPaths.empty()) {

871

872

873 if (!DefChainEnd)

874 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))

875 DefChainEnd = MA;

876 assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");

877

878

879

881 for (const TerminatedPath &TP : TerminatedPaths) {

882

883

884 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))

886 }

887 }

888

889

890

891 if (!Clobbers.empty()) {

892 MoveDominatedPathToEnd(Clobbers);

894 return {Result, std::move(Clobbers)};

895 }

896

898 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));

899

900

902

903 PriorPathsSize = Paths.size();

904 PausedSearches.clear();

905 for (ListIndex I : NewPaused)

906 addSearches(DefChainPhi, PausedSearches, I);

907 NewPaused.clear();

908

909 Current = DefChainPhi;

910 }

911 }

912

913 void verifyOptResult(const OptznResult &R) const {

914 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {

915 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);

916 }));

917 }

918

919 void resetPhiOptznState() {

921 VisitedPhis.clear();

922 }

923

924public:

925 ClobberWalker(const MemorySSA &MSSA, DominatorTree &DT)

926 : MSSA(MSSA), DT(DT) {}

927

928

929

930 MemoryAccess *findClobber(BatchAAResults &BAA, MemoryAccess *Start,

931 UpwardsMemoryQuery &Q, unsigned &UpWalkLimit) {

932 AA = &BAA;

933 Query = &Q;

934 UpwardWalkLimit = &UpWalkLimit;

935

936 if (!UpWalkLimit)

937 UpWalkLimit++;

938

939 MemoryAccess *Current = Start;

940

941

943 Current = MU->getDefiningAccess();

944

945 DefPath FirstDesc(Q.StartingLoc, Current, Current, std::nullopt);

946

947

948 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);

949 MemoryAccess *Result;

950 if (WalkResult.IsKnownClobber) {

951 Result = WalkResult.Result;

952 } else {

953 OptznResult OptRes = tryOptimizePhi(cast(FirstDesc.Last),

954 Current, Q.StartingLoc);

955 verifyOptResult(OptRes);

956 resetPhiOptznState();

957 Result = OptRes.PrimaryClobber.Clobber;

958 }

959

960#ifdef EXPENSIVE_CHECKS

961 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)

963#endif

965 }

966};

967

968struct RenamePassData {

971 MemoryAccess *IncomingVal;

972

974 MemoryAccess *M)

975 : DTN(D), ChildIt(It), IncomingVal(M) {}

976

977 void swap(RenamePassData &RHS) {

981 }

982};

983

984}

985

986namespace llvm {

987

989 ClobberWalker Walker;

991

992public:

994

998

999

1000

1001

1002

1003

1005 unsigned &, bool,

1006 bool UseInvariantGroup = true);

1007};

1008

1009

1010

1011

1014

1015public:

1019

1021

1023 unsigned &UWL) {

1024 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false);

1025 }

1029 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);

1030 }

1031

1034 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false, false);

1035 }

1036

1048

1051 MUD->resetOptimized();

1052 }

1053};

1054

1057

1058public:

1062

1064

1066 unsigned &UWL) {

1067 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, true);

1068 }

1072 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);

1073 }

1074

1086

1089 MUD->resetOptimized();

1090 }

1091};

1092

1093}

1094

1096 bool RenameAllUses) {

1097

1099 auto It = PerBlockAccesses.find(S);

1100

1101 if (It == PerBlockAccesses.end() || isa<MemoryPhi>(It->second->front()))

1102 continue;

1105 if (RenameAllUses) {

1106 bool ReplacementDone = false;

1107 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)

1108 if (Phi->getIncomingBlock(I) == BB) {

1109 Phi->setIncomingValue(I, IncomingVal);

1110 ReplacementDone = true;

1111 }

1112 (void) ReplacementDone;

1113 assert(ReplacementDone && "Incomplete phi during partial rename");

1114 } else

1115 Phi->addIncoming(IncomingVal, BB);

1116 }

1117}

1118

1119

1120

1121

1123 bool RenameAllUses) {

1124 auto It = PerBlockAccesses.find(BB);

1125

1126 if (It != PerBlockAccesses.end()) {

1128 for (MemoryAccess &L : *Accesses) {

1130 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)

1131 MUD->setDefiningAccess(IncomingVal);

1133 IncomingVal = &L;

1134 } else {

1135 IncomingVal = &L;

1136 }

1137 }

1138 }

1139 return IncomingVal;

1140}

1141

1142

1143

1144

1145

1148 bool SkipVisited, bool RenameAllUses) {

1149 assert(Root && "Trying to rename accesses in an unreachable block");

1150

1152

1153

1154

1155 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;

1156 if (SkipVisited && AlreadyVisited)

1157 return;

1158

1159 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);

1160 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);

1161 WorkStack.push_back({Root, Root->begin(), IncomingVal});

1162

1163 while (!WorkStack.empty()) {

1166 IncomingVal = WorkStack.back().IncomingVal;

1167

1168 if (ChildIt == Node->end()) {

1170 } else {

1172 ++WorkStack.back().ChildIt;

1174

1175

1176 AlreadyVisited = !Visited.insert(BB).second;

1177 if (SkipVisited && AlreadyVisited) {

1178

1179

1180

1181

1182

1184 IncomingVal = &*BlockDefs->rbegin();

1185 } else

1186 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);

1187 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);

1188 WorkStack.push_back({Child, Child->begin(), IncomingVal});

1189 }

1190 }

1191}

1192

1193

1194

1195

1196void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {

1197 assert(!DT->isReachableFromEntry(BB) &&

1198 "Reachable block found while handling unreachable blocks");

1199

1200

1201

1202

1203

1204 for (const BasicBlock *S : successors(BB)) {

1205 if (!DT->isReachableFromEntry(S))

1206 continue;

1207 auto It = PerBlockAccesses.find(S);

1208

1209 if (It == PerBlockAccesses.end() || isa<MemoryPhi>(It->second->front()))

1210 continue;

1213 Phi->addIncoming(LiveOnEntryDef.get(), BB);

1214 }

1215

1216 auto It = PerBlockAccesses.find(BB);

1217 if (It == PerBlockAccesses.end())

1218 return;

1219

1220 auto &Accesses = It->second;

1221 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {

1222 auto Next = std::next(AI);

1223

1224

1226 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());

1227 else

1230 }

1231}

1232

1234 : DT(DT), F(&Func), LiveOnEntryDef(nullptr), Walker(nullptr),

1235 SkipWalker(nullptr) {

1236

1237

1238

1239

1240

1241 assert(AA && "No alias analysis?");

1243 buildMemorySSA(BatchAA, iterator_range(F->begin(), F->end()));

1244

1245

1246 this->AA = AA;

1247

1249}

1250

1252 : DT(DT), L(&L), LiveOnEntryDef(nullptr), Walker(nullptr),

1253 SkipWalker(nullptr) {

1254

1255

1256

1257

1258

1259 assert(AA && "No alias analysis?");

1261 buildMemorySSA(

1263 return *const_cast<BasicBlock *>(BB);

1264 }));

1265

1266

1267 this->AA = AA;

1268

1270}

1271

1273

1274 for (const auto &Pair : PerBlockAccesses)

1277}

1278

1280 auto Res = PerBlockAccesses.try_emplace(BB);

1281

1282 if (Res.second)

1283 Res.first->second = std::make_unique();

1284 return Res.first->second.get();

1285}

1286

1288 auto Res = PerBlockDefs.try_emplace(BB);

1289

1290 if (Res.second)

1291 Res.first->second = std::make_unique();

1292 return Res.first->second.get();

1293}

1294

1295namespace llvm {

1296

1297

1298

1299

1300

1301

1302

1303

1305public:

1308 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}

1309

1311

1312private:

1313

1314 struct MemlocStackInfo {

1315

1316

1317 unsigned long StackEpoch;

1318 unsigned long PopEpoch;

1319

1320

1321

1322

1323 unsigned long LowerBound;

1325

1326 unsigned long LastKill;

1327 bool LastKillValid;

1328 };

1329

1330 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,

1333

1335 CachingWalker *Walker;

1338};

1339

1340}

1341

1342

1343

1344

1345

1346

1347

1348

1349

1350

1351

1352

1353

1354void MemorySSA::OptimizeUses::optimizeUsesInBlock(

1355 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,

1358

1359

1362 return;

1363

1364

1365

1366 while (true) {

1368 !VersionStack.empty() &&

1369 "Version stack should have liveOnEntry sentinel dominating everything");

1370 BasicBlock *BackBlock = VersionStack.back()->getBlock();

1371 if (DT->dominates(BackBlock, BB))

1372 break;

1373 while (VersionStack.back()->getBlock() == BackBlock)

1375 ++PopEpoch;

1376 }

1377

1378 for (MemoryAccess &MA : *Accesses) {

1380 if (!MU) {

1382 ++StackEpoch;

1383 continue;

1384 }

1385

1386 if (MU->isOptimized())

1387 continue;

1388

1389 MemoryLocOrCall UseMLOC(MU);

1390 auto &LocInfo = LocStackInfo[UseMLOC];

1391

1392

1393

1394 if (LocInfo.PopEpoch != PopEpoch) {

1395 LocInfo.PopEpoch = PopEpoch;

1396 LocInfo.StackEpoch = StackEpoch;

1397

1398

1399

1400

1401

1402

1403

1404

1405

1406

1407

1408 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&

1409 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {

1410

1411

1412

1413 LocInfo.LowerBound = 0;

1414 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();

1415 LocInfo.LastKillValid = false;

1416 }

1417 } else if (LocInfo.StackEpoch != StackEpoch) {

1418

1419

1420

1421 LocInfo.PopEpoch = PopEpoch;

1422 LocInfo.StackEpoch = StackEpoch;

1423 }

1424 if (!LocInfo.LastKillValid) {

1425 LocInfo.LastKill = VersionStack.size() - 1;

1426 LocInfo.LastKillValid = true;

1427 }

1428

1429

1430

1431 assert(LocInfo.LowerBound < VersionStack.size() &&

1432 "Lower bound out of range");

1433 assert(LocInfo.LastKill < VersionStack.size() &&

1434 "Last kill info out of range");

1435

1436 unsigned long UpperBound = VersionStack.size() - 1;

1437

1438 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {

1439 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("

1440 << *(MU->getMemoryInst()) << ")"

1441 << " because there are "

1442 << UpperBound - LocInfo.LowerBound

1443 << " stores to disambiguate\n");

1444

1445

1446 LocInfo.LastKillValid = false;

1447 continue;

1448 }

1449 bool FoundClobberResult = false;

1451 while (UpperBound > LocInfo.LowerBound) {

1453

1454

1455

1456 MemoryAccess *Result =

1457 Walker->getClobberingMemoryAccessWithoutInvariantGroup(

1458 MU, *AA, UpwardWalkLimit);

1459

1460 while (VersionStack[UpperBound] != Result) {

1461 assert(UpperBound != 0);

1462 --UpperBound;

1463 }

1464 FoundClobberResult = true;

1465 break;

1466 }

1467

1468 MemoryDef *MD = cast(VersionStack[UpperBound]);

1470 FoundClobberResult = true;

1471 break;

1472 }

1473 --UpperBound;

1474 }

1475

1476

1477

1478 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {

1479 MU->setDefiningAccess(VersionStack[UpperBound], true);

1480 LocInfo.LastKill = UpperBound;

1481 } else {

1482

1483

1484 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true);

1485 }

1486 LocInfo.LowerBound = VersionStack.size() - 1;

1487 LocInfo.LowerBoundBlock = BB;

1488 }

1489}

1490

1491

1495 VersionStack.push_back(MSSA->getLiveOnEntryDef());

1496

1497 unsigned long StackEpoch = 1;

1498 unsigned long PopEpoch = 1;

1499

1500 for (const auto *DomNode : depth_first(DT->getRootNode()))

1501 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,

1502 LocStackInfo);

1503}

1504

1505void MemorySSA::placePHINodes(

1507

1509 IDFs.setDefiningBlocks(DefiningBlocks);

1511 IDFs.calculate(IDFBlocks);

1512

1513

1514 for (auto &BB : IDFBlocks)

1515 createMemoryPhi(BB);

1516}

1517

1518template

1519void MemorySSA::buildMemorySSA(BatchAAResults &BAA, IterT Blocks) {

1520

1521

1522

1523

1524

1525

1528 nullptr, &StartingPoint, NextID++));

1529

1530

1531

1532

1534

1535

1537 bool InsertIntoDef = false;

1542 if (!MUD)

1543 continue;

1544

1546 Accesses = getOrCreateAccessList(&B);

1549 InsertIntoDef = true;

1550 if (!Defs)

1551 Defs = getOrCreateDefsList(&B);

1552 Defs->push_back(*MUD);

1553 }

1554 }

1555 if (InsertIntoDef)

1556 DefiningBlocks.insert(&B);

1557 }

1558 placePHINodes(DefiningBlocks);

1559

1560

1561

1562 SmallPtrSet<BasicBlock *, 16> Visited;

1563 if (L) {

1564

1565

1566

1569 U.set(LiveOnEntryDef.get());

1571 }

1572

1573

1575 L->getExitBlocks(ExitBlocks);

1577 renamePass(DT->getNode(L->getLoopPreheader()), LiveOnEntryDef.get(),

1578 Visited);

1579 } else {

1580 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);

1581 }

1582

1583

1584

1585 for (auto &BB : Blocks)

1586 if (!Visited.count(&BB))

1587 markUnreachableAsLiveOnEntry(&BB);

1588}

1589

1591

1593 if (Walker)

1594 return Walker.get();

1595

1596 if (!WalkerBase)

1597 WalkerBase = std::make_unique(this, DT);

1598

1599 Walker = std::make_unique(this, WalkerBase.get());

1600 return Walker.get();

1601}

1602

1604 if (SkipWalker)

1605 return SkipWalker.get();

1606

1607 if (!WalkerBase)

1608 WalkerBase = std::make_unique(this, DT);

1609

1610 SkipWalker = std::make_unique(this, WalkerBase.get());

1611 return SkipWalker.get();

1612 }

1613

1614

1615

1616

1617

1621 auto *Accesses = getOrCreateAccessList(BB);

1623

1624

1626 Accesses->push_front(NewAccess);

1627 auto *Defs = getOrCreateDefsList(BB);

1628 Defs->push_front(*NewAccess);

1629 } else {

1632 Accesses->insert(AI, NewAccess);

1634 auto *Defs = getOrCreateDefsList(BB);

1637 Defs->insert(DI, *NewAccess);

1638 }

1639 }

1640 } else {

1641 Accesses->push_back(NewAccess);

1643 auto *Defs = getOrCreateDefsList(BB);

1644 Defs->push_back(*NewAccess);

1645 }

1646 }

1647 BlockNumberingValid.erase(BB);

1648}

1649

1653 bool WasEnd = InsertPt == Accesses->end();

1656 auto *Defs = getOrCreateDefsList(BB);

1657

1658

1659

1660

1661 if (WasEnd) {

1662 Defs->push_back(*What);

1664 Defs->insert(InsertPt->getDefsIterator(), *What);

1665 } else {

1667 ++InsertPt;

1668

1669 if (InsertPt == Accesses->end())

1670 Defs->push_back(*What);

1671 else

1672 Defs->insert(InsertPt->getDefsIterator(), *What);

1673 }

1674 }

1675 BlockNumberingValid.erase(BB);

1676}

1677

1679

1681

1682

1683

1684

1688}

1689

1690

1691

1692

1693

1696 prepareForMoveTo(What, BB);

1698}

1699

1704 "Can only move a Phi at the beginning of the block");

1705

1706 ValueToMemoryAccess.erase(What->getBlock());

1707 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;

1708 (void)Inserted;

1709 assert(Inserted && "Cannot move a Phi to a block that already has one");

1710 }

1711

1712 prepareForMoveTo(What, BB);

1714}

1715

1719

1721 ValueToMemoryAccess[BB] = Phi;

1722 return Phi;

1723}

1724

1728 bool CreationMustSucceed) {

1731 if (CreationMustSucceed)

1732 assert(NewAccess != nullptr && "Tried to create a memory access for a "

1733 "non-memory touching instruction");

1734 if (NewAccess) {

1736 "A use cannot be a defining access");

1738 }

1739 return NewAccess;

1740}

1741

1742

1743

1744

1747 if (SI->isUnordered())

1748 return true;

1750 if (!LI->isUnordered())

1751 return true;

1752 }

1753 return false;

1754}

1755

1756

1757template

1758MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,

1759 AliasAnalysisType *AAP,

1760 const MemoryUseOrDef *Template) {

1761

1762

1763

1764

1765

1766

1768 switch (II->getIntrinsicID()) {

1769 default:

1770 break;

1771 case Intrinsic::allow_runtime_check:

1772 case Intrinsic::allow_ubsan_check:

1773 case Intrinsic::assume:

1774 case Intrinsic::experimental_noalias_scope_decl:

1775 case Intrinsic::pseudoprobe:

1776 return nullptr;

1777 }

1778 }

1779

1780

1781

1782

1783 if (I->mayReadFromMemory() && I->mayWriteToMemory())

1784 return nullptr;

1785

1790#if !defined(NDEBUG)

1792 bool DefCheck, UseCheck;

1795

1796

1797 assert((Def == DefCheck || !DefCheck) &&

1798 "Memory accesses should only be reduced");

1799 if (!Def && Use != UseCheck) {

1800

1801 assert(!UseCheck && "Invalid template");

1802 }

1803#endif

1804 } else {

1805

1807

1808

1809

1810

1811

1812

1813

1814

1817 }

1818

1819

1820

1821 if (!Def && !Use)

1822 return nullptr;

1823

1824 MemoryUseOrDef *MUD;

1825 if (Def) {

1826 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);

1827 } else {

1828 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());

1832 }

1833 }

1834 ValueToMemoryAccess[I] = MUD;

1835 return MUD;

1836}

1837

1838

1841 "Trying to remove memory access that still has uses");

1842 BlockNumbering.erase(MA);

1845

1848

1849 Value *MemoryInst;

1852 else

1853 MemoryInst = MA->getBlock();

1854

1855 auto VMA = ValueToMemoryAccess.find(MemoryInst);

1856 if (VMA->second == MA)

1857 ValueToMemoryAccess.erase(VMA);

1858}

1859

1860

1861

1862

1863

1864

1865

1868

1869

1871 auto DefsIt = PerBlockDefs.find(BB);

1872 std::unique_ptr &Defs = DefsIt->second;

1873 Defs->remove(*MA);

1874 if (Defs->empty())

1875 PerBlockDefs.erase(DefsIt);

1876 }

1877

1878

1879

1880 auto AccessIt = PerBlockAccesses.find(BB);

1881 std::unique_ptr &Accesses = AccessIt->second;

1882 if (ShouldDelete)

1884 else

1886

1888 PerBlockAccesses.erase(AccessIt);

1889 BlockNumberingValid.erase(BB);

1890 }

1891}

1892

1894 MemorySSAAnnotatedWriter Writer(this);

1896 if (L)

1897 F = L->getHeader()->getParent();

1898 F->print(OS, &Writer);

1899}

1900

1901#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)

1903#endif

1904

1906#if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)

1908#endif

1909

1910#ifndef NDEBUG

1911 if (F) {

1917 } else {

1918 assert(L && "must either have loop or function");

1919 auto Blocks =

1921 return *const_cast<BasicBlock *>(BB);

1922 });

1927 }

1928#endif

1929

1930

1931

1932

1933

1934

1935

1936

1937

1938

1939}

1940

1941template

1943 for (const BasicBlock &BB : Blocks) {

1945 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {

1946 auto *Pred = Phi->getIncomingBlock(I);

1947 auto *IncAcc = Phi->getIncomingValue(I);

1948

1949

1950

1951 if (auto *DTNode = DT->getNode(Pred)) {

1952 while (DTNode) {

1953 if (auto *DefList = getBlockDefs(DTNode->getBlock())) {

1954 auto *LastAcc = &*(--DefList->end());

1955 assert(LastAcc == IncAcc &&

1956 "Incorrect incoming access into phi.");

1957 (void)IncAcc;

1958 (void)LastAcc;

1959 break;

1960 }

1961 DTNode = DTNode->getIDom();

1962 }

1963 } else {

1964

1965

1966

1967

1968

1969

1970 }

1971 }

1972 }

1973 }

1974}

1975

1976

1977

1978template

1980 if (BlockNumberingValid.empty())

1981 return;

1982

1984 for (const BasicBlock &BB : Blocks) {

1985 if (!ValidBlocks.count(&BB))

1986 continue;

1987

1988 ValidBlocks.erase(&BB);

1989

1991

1993 continue;

1994

1995

1996 unsigned long LastNumber = 0;

1998 auto ThisNumberIter = BlockNumbering.find(&MA);

1999 assert(ThisNumberIter != BlockNumbering.end() &&

2000 "MemoryAccess has no domination number in a valid block!");

2001

2002 unsigned long ThisNumber = ThisNumberIter->second;

2003 assert(ThisNumber > LastNumber &&

2004 "Domination numbers should be strictly increasing!");

2005 (void)LastNumber;

2006 LastNumber = ThisNumber;

2007 }

2008 }

2009

2011 "All valid BasicBlocks should exist in F -- dangling pointers?");

2012}

2013

2014

2015

2016

2017

2018

2019

2020template

2023

2024

2025

2032 if (Phi) {

2033

2036

2037 for (const Use &U : Phi->uses()) {

2038 assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");

2039 (void)U;

2040 }

2041

2044 "Incomplete MemoryPhi Node");

2045 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {

2046 verifyUseInDefs(Phi->getIncomingValue(I), Phi);

2048 "Incoming phi block not a block predecessor");

2049 }

2050 }

2051 }

2052

2056 "We have memory affecting instructions "

2057 "in this block but they are not in the "

2058 "access list or defs list");

2059 if (MA) {

2060

2063

2065

2066 for (const Use &U : MD->uses()) {

2068 "Memory Def does not dominate it's uses");

2069 (void)U;

2070 }

2071 }

2072

2075 }

2076 }

2077

2078

2079 if (!AL && DL)

2080 continue;

2081

2082 assert(AL->size() == ActualAccesses.size() &&

2083 "We don't have the same number of accesses in the block as on the "

2084 "access list");

2086 "Either we should have a defs list, or we should have no defs");

2088 "We don't have the same number of defs in the block as on the "

2089 "def list");

2090 auto ALI = AL->begin();

2091 auto AAI = ActualAccesses.begin();

2092 while (ALI != AL->end() && AAI != ActualAccesses.end()) {

2093 assert(&*ALI == *AAI && "Not the same accesses in the same order");

2094 ++ALI;

2095 ++AAI;

2096 }

2097 ActualAccesses.clear();

2098 if (DL) {

2099 auto DLI = DL->begin();

2100 auto ADI = ActualDefs.begin();

2101 while (DLI != DL->end() && ADI != ActualDefs.end()) {

2102 assert(&*DLI == *ADI && "Not the same defs in the same order");

2103 ++DLI;

2104 ++ADI;

2105 }

2106 }

2107 ActualDefs.clear();

2108 }

2109}

2110

2111

2112

2114

2115 if (!Def)

2117 "Null def but use not point to live on entry def");

2118 else

2120 "Did not find use in def's use list");

2121}

2122

2123

2124

2125

2126

2127

2128

2129void MemorySSA::renumberBlock(const BasicBlock *B) const {

2130

2131 unsigned long CurrentNumber = 0;

2133 assert(AL != nullptr && "Asking to renumber an empty block");

2134 for (const auto &I : *AL)

2135 BlockNumbering[&I] = ++CurrentNumber;

2136 BlockNumberingValid.insert(B);

2137}

2138

2139

2140

2141

2145

2147 "Asking for local domination when accesses are in different blocks!");

2148

2149 if (Dominatee == Dominator)

2150 return true;

2151

2152

2153

2155 return false;

2156

2157

2158

2160 return true;

2161

2162 if (!BlockNumberingValid.count(DominatorBlock))

2163 renumberBlock(DominatorBlock);

2164

2165 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);

2166

2167 assert(DominatorNum != 0 && "Block was not numbered properly");

2168 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);

2169 assert(DominateeNum != 0 && "Block was not numbered properly");

2170 return DominatorNum < DominateeNum;

2171}

2172

2175 if (Dominator == Dominatee)

2176 return true;

2177

2179 return false;

2180

2182 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());

2184}

2185

2187 const Use &Dominatee) const {

2189 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);

2190

2191 if (UseBB != Dominator->getBlock())

2192 return DT->dominates(Dominator->getBlock(), UseBB);

2193

2195 }

2196

2198}

2199

2201 if (IsOptimized)

2202 return;

2203

2208 IsOptimized = true;

2209}

2210

2213 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);

2214 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);

2215 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);

2216 }

2218}

2219

2222

2224 if (A && A->getID())

2225 OS << A->getID();

2226 else

2228 };

2229

2230 OS << getID() << " = MemoryDef(";

2231 printID(UO);

2232 OS << ")";

2233

2235 OS << "->";

2237 }

2238}

2239

2242 OS << getID() << " = MemoryPhi(";

2246

2247 OS << LS << '{';

2250 else

2252 OS << ',';

2253 if (unsigned ID = MA->getID())

2254 OS << ID;

2255 else

2257 OS << '}';

2258 }

2259 OS << ')';

2260}

2261

2264 OS << "MemoryUse(";

2265 if (UO && UO->getID())

2266 OS << UO->getID();

2267 else

2269 OS << ')';

2270}

2271

2273

2274#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)

2276 dbgs() << "\n";

2277#endif

2278}

2279

2281private:

2283 MemorySSAAnnotatedWriter MSSAWriter;

2284

2285public:

2287 : F(F), MSSAWriter(&MSSA) {}

2288

2290 MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; }

2291};

2292

2293namespace llvm {

2294

2295template <>

2298 return &(CFGInfo->getFunction()->getEntryBlock());

2299 }

2300

2301

2303

2307

2311

2315};

2316

2317template <>

2319

2321

2324 "' function";

2325 }

2326

2329 Node, nullptr,

2332 },

2333 [](std::string &S, unsigned &I, unsigned Idx) -> void {

2334 std::string Str = S.substr(I, Idx - I);

2336 if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") ||

2337 SR.count("MemoryUse("))

2338 return;

2340 });

2341 }

2342

2347

2348

2353

2356 return getNodeLabel(Node, CFGInfo).find(';') != std:🧵:npos

2357 ? "style=filled, fillcolor=lightpink"

2358 : "";

2359 }

2360};

2361

2362}

2363

2364AnalysisKey MemorySSAAnalysis::Key;

2365

2372

2375 FunctionAnalysisManager::Invalidator &Inv) {

2380}

2381

2385 if (EnsureOptimizedUses)

2390 } else {

2391 OS << "MemorySSA for function: " << F.getName() << "\n";

2393 }

2394

2396}

2397

2401 OS << "MemorySSA (walker) for function: " << F.getName() << "\n";

2402 MemorySSAWalkerAnnotatedWriter Writer(&MSSA);

2403 F.print(OS, &Writer);

2404

2406}

2407

2414

2416

2418

2420

2426

2433

2436 MSSA->verifyMemorySSA();

2437}

2438

2440 MSSA->print(OS);

2441}

2442

2444

2445

2446

2447

2448

2453

2454

2455 if (Loc.Ptr == nullptr)

2456 return StartingAccess;

2457

2461 return StartingUseOrDef;

2462

2463 I = StartingUseOrDef->getMemoryInst();

2464

2465

2466

2468 return StartingUseOrDef;

2469 }

2470

2471 UpwardsMemoryQuery Q;

2472 Q.OriginalAccess = StartingAccess;

2473 Q.StartingLoc = Loc;

2474 Q.Inst = nullptr;

2475 Q.IsCall = false;

2476

2477

2478

2479

2481 Walker.findClobber(BAA, StartingAccess, Q, UpwardWalkLimit);

2483 dbgs() << "Clobber starting at access " << *StartingAccess << "\n";

2484 if (I)

2485 dbgs() << " for instruction " << *I << "\n";

2486 dbgs() << " is " << *Clobber << "\n";

2487 });

2488 return Clobber;

2489}

2490

2493 if (I.hasMetadata(LLVMContext::MD_invariant_group) || I.isVolatile())

2494 return nullptr;

2495

2496

2497

2498

2500

2501

2502

2503

2504

2506 return nullptr;

2507

2508 const Instruction *MostDominatingInstruction = &I;

2509

2510 for (const User *Us : PointerOperand->users()) {

2512 if (!U || U == &I || !DT.dominates(U, MostDominatingInstruction))

2513 continue;

2514

2515

2516

2517

2518 if (U->hasMetadata(LLVMContext::MD_invariant_group) &&

2520 MostDominatingInstruction = U;

2521 }

2522 }

2523

2524 return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction;

2525}

2526

2528 MemoryAccess *MA, BatchAAResults &BAA, unsigned &UpwardWalkLimit,

2529 bool SkipSelf, bool UseInvariantGroup) {

2531

2532 if (!StartingAccess)

2533 return MA;

2534

2535 if (UseInvariantGroup) {

2537 *StartingAccess->getMemoryInst(), MSSA->getDomTree())) {

2539

2543 return ClobberMA->getDefiningAccess();

2544 return ClobberMA;

2545 }

2546 }

2547

2548 bool IsOptimized = false;

2549

2550

2551

2552

2553 if (StartingAccess->isOptimized()) {

2555 return StartingAccess->getOptimized();

2556 IsOptimized = true;

2557 }

2558

2559 const Instruction *I = StartingAccess->getMemoryInst();

2560

2561

2562

2564 return StartingAccess;

2565

2566 UpwardsMemoryQuery Q(I, StartingAccess);

2567

2570 StartingAccess->setOptimized(LiveOnEntry);

2571 return LiveOnEntry;

2572 }

2573

2574 MemoryAccess *OptimizedAccess;

2575 if (!IsOptimized) {

2576

2577 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();

2578

2579

2580

2582 StartingAccess->setOptimized(DefiningAccess);

2583 return DefiningAccess;

2584 }

2585

2586 OptimizedAccess =

2587 Walker.findClobber(BAA, DefiningAccess, Q, UpwardWalkLimit);

2588 StartingAccess->setOptimized(OptimizedAccess);

2589 } else

2590 OptimizedAccess = StartingAccess->getOptimized();

2591

2592 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");

2594 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");

2596

2597 MemoryAccess *Result;

2599 isa(StartingAccess) && UpwardWalkLimit) {

2601 Q.SkipSelfAccess = true;

2602 Result = Walker.findClobber(BAA, OptimizedAccess, Q, UpwardWalkLimit);

2603 } else

2604 Result = OptimizedAccess;

2605

2606 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);

2607 LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");

2608

2610}

2611

2612MemoryAccess *

2616 return Use->getDefiningAccess();

2617 return MA;

2618}

2619

2623 return Use->getDefiningAccess();

2624 return StartingAccess;

2625}

2626

2627void MemoryPhi::deleteMe(DerivedUser *Self) {

2628 delete static_cast<MemoryPhi *>(Self);

2629}

2630

2631void MemoryDef::deleteMe(DerivedUser *Self) {

2632 delete static_cast<MemoryDef *>(Self);

2633}

2634

2635void MemoryUse::deleteMe(DerivedUser *Self) {

2636 delete static_cast<MemoryUse *>(Self);

2637}

2638

2639bool upward_defs_iterator::IsGuaranteedLoopInvariant(const Value *Ptr) const {

2640 auto IsGuaranteedLoopInvariantBase = [](const Value *Ptr) {

2643 return true;

2645 };

2646

2649 if (I->getParent()->isEntryBlock())

2650 return true;

2651 }

2653 return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&

2654 GEP->hasAllConstantIndices();

2655 }

2656 return IsGuaranteedLoopInvariantBase(Ptr);

2657}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Atomic ordering constants.

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

#define LLVM_DUMP_METHOD

Mark debug helper function definitions like dump() that should not be stripped from debug builds.

DXIL Forward Handle Accesses

This file defines DenseMapInfo traits for DenseMap.

This file defines the DenseMap class.

This file defines the DenseSet and SmallDenseSet classes.

This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.

early cse Early CSE w MemorySSA

This header defines various interfaces for pass management in LLVM.

This defines the Use class.

This file provides utility analysis objects describing memory locations.

static bool instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysisType &AA)

Definition MemorySSA.cpp:282

Memory static true cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))

static cl::opt< bool, true > VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), cl::Hidden, cl::desc("Enable verification of MemorySSA."))

static const char LiveOnEntryStr[]

Definition MemorySSA.cpp:91

static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, const Instruction *I)

Definition MemorySSA.cpp:371

static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)

This does one-way checks to see if Use could theoretically be hoisted above MayClobber.

Definition MemorySSA.cpp:256

static const Instruction * getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT)

Definition MemorySSA.cpp:2492

static void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, BatchAAResults &AA, bool AllowImpreciseClobber=false)

Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...

Definition MemorySSA.cpp:397

static cl::opt< std::string > DotCFGMSSA("dot-cfg-mssa", cl::value_desc("file name for generated dot file"), cl::desc("file name for generated dot file"), cl::init(""))

static bool isOrdered(const Instruction *I)

Definition MemorySSA.cpp:1745

This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...

static std::string getNodeLabel(const ValueInfo &VI, GlobalValueSummary *GVS)

uint64_t IntrinsicInst * II

#define INITIALIZE_PASS_DEPENDENCY(depName)

#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)

#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)

This file defines the SmallPtrSet class.

This file defines the SmallVector class.

Definition MemorySSA.cpp:2280

DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)

Definition MemorySSA.cpp:2286

const Function * getFunction()

Definition MemorySSA.cpp:2289

MemorySSAAnnotatedWriter & getWriter()

Definition MemorySSA.cpp:2290

A manager for alias analyses.

A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.

This templated class represents "all analyses that operate over " (e....

PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)

Get the result of an analysis pass for a given IR unit.

Represent the analysis usage information of a pass.

void setPreservesAll()

Set by analyses that do not transform their input at all.

AnalysisUsage & addRequiredTransitive()

LLVM Basic Block Representation.

LLVM_ABI BasicBlock::iterator erase(BasicBlock::iterator FromIt, BasicBlock::iterator ToIt)

Erases a range of instructions from FromIt to (not including) ToIt.

iterator begin()

Instruction iterator methods.

LLVM_ABI void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW=nullptr, bool ShouldPreserveUseListOrder=false, bool IsForDebug=false) const

Print the basic block to an output stream with an optional AssemblyAnnotationWriter.

LLVM_ABI LLVMContext & getContext() const

Get the context in which this basic block lives.

This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

Value * getCalledOperand() const

User::op_iterator arg_end()

Return the iterator pointing to the end of the argument list.

unsigned arg_size() const

Implements a dense probed hash-table based set.

Extension point for the Value hierarchy.

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *, BatchAAResults &) override

Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...

Definition MemorySSA.cpp:2613

typename SmallVector< DomTreeNodeBase *, 4 >::const_iterator const_iterator

Analysis pass which computes a DominatorTree.

DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const

getNode - return the (Post)DominatorTree node for the specified basic block.

Legacy analysis pass which computes a DominatorTree.

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

LLVM_ABI bool isReachableFromEntry(const Use &U) const

Provide an overload for a Use.

LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const

Return true if the (end of the) basic block BB dominates the use U.

Module * getParent()

Get the module that this global value is contained inside of...

A wrapper class for inspecting calls to intrinsic functions.

A helper class to return the specified delimiter string after the first invocation of operator String...

An instruction for reading from memory.

bool isVolatile() const

Return true if this is a load from a volatile memory location.

AtomicOrdering getOrdering() const

Returns the ordering constraint of this load instruction.

Represents a single loop in the control flow graph.

LLVM_ABI void dump() const

Definition MemorySSA.cpp:2272

MemoryAccess(const MemoryAccess &)=delete

BasicBlock * getBlock() const

LLVM_ABI void print(raw_ostream &OS) const

Definition MemorySSA.cpp:2211

unsigned getID() const

Used for debugging and tracking things about MemoryAccesses.

void setBlock(BasicBlock *BB)

Used by MemorySSA to change the block of a MemoryAccess when it is moved.

Represents a read-write access to memory, whether it is a must-alias, or a may-alias.

LLVM_ABI void print(raw_ostream &OS) const

Definition MemorySSA.cpp:2220

MemoryAccess * getOptimized() const

Representation for a specific memory location.

static LLVM_ABI MemoryLocation get(const LoadInst *LI)

Return a location with information about the memory reference by the given instruction.

Represents phi nodes for memory accesses.

BasicBlock * getIncomingBlock(unsigned I) const

Return incoming basic block number i.

LLVM_ABI void print(raw_ostream &OS) const

Definition MemorySSA.cpp:2240

An analysis that produces MemorySSA for a function.

LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM)

Definition MemorySSA.cpp:2366

LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)

Definition MemorySSA.cpp:2382

static LLVM_ABI bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)

Definition MemorySSA.cpp:340

LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)

Definition MemorySSA.cpp:2398

This is the generic walker interface for walkers of MemorySSA.

MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)

Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...

LLVM_ABI MemorySSAWalker(MemorySSA *)

Definition MemorySSA.cpp:2443

virtual void invalidateInfo(MemoryAccess *)

Given a memory access, invalidate anything this walker knows about that access.

Legacy analysis pass which computes MemorySSA.

void verifyAnalysis() const override

verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...

Definition MemorySSA.cpp:2434

void releaseMemory() override

releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...

Definition MemorySSA.cpp:2419

bool runOnFunction(Function &) override

runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.

Definition MemorySSA.cpp:2427

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...

Definition MemorySSA.cpp:2421

void print(raw_ostream &OS, const Module *M=nullptr) const override

print - Print out the internal state of the pass.

Definition MemorySSA.cpp:2439

MemorySSAWrapperPass()

Definition MemorySSA.cpp:2417

A MemorySSAWalker that does AA walks to disambiguate accesses.

Definition MemorySSA.cpp:1012

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override

Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...

Definition MemorySSA.cpp:1037

MemoryAccess * getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)

Definition MemorySSA.cpp:1032

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)

Definition MemorySSA.cpp:1026

void invalidateInfo(MemoryAccess *MA) override

Given a memory access, invalidate anything this walker knows about that access.

Definition MemorySSA.cpp:1049

~CachingWalker() override=default

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)

Definition MemorySSA.cpp:1022

CachingWalker(MemorySSA *M, ClobberWalkerBase *W)

Definition MemorySSA.cpp:1016

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override

Given a potentially clobbering memory access and a new location, calling this will give you the neare...

Definition MemorySSA.cpp:1042

Definition MemorySSA.cpp:988

ClobberWalkerBase(MemorySSA *M, DominatorTree *D)

Definition MemorySSA.cpp:993

MemoryAccess * getClobberingMemoryAccessBase(MemoryAccess *, BatchAAResults &, unsigned &, bool, bool UseInvariantGroup=true)

MemoryAccess * getClobberingMemoryAccessBase(MemoryAccess *, const MemoryLocation &, BatchAAResults &, unsigned &)

This class is a batch walker of all MemoryUse's in the program, and points their defining access at t...

Definition MemorySSA.cpp:1304

void optimizeUses()

Optimize uses to point to their actual clobbering definitions.

Definition MemorySSA.cpp:1492

OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA, DominatorTree *DT)

Definition MemorySSA.cpp:1306

Definition MemorySSA.cpp:1055

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override

Given a potentially clobbering memory access and a new location, calling this will give you the neare...

Definition MemorySSA.cpp:1080

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)

Definition MemorySSA.cpp:1069

~SkipSelfWalker() override=default

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)

Definition MemorySSA.cpp:1065

SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)

Definition MemorySSA.cpp:1059

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override

Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...

Definition MemorySSA.cpp:1075

void invalidateInfo(MemoryAccess *MA) override

Given a memory access, invalidate anything this walker knows about that access.

Definition MemorySSA.cpp:1087

Encapsulates MemorySSA, including all data associated with memory accesses.

LLVM_ABI void dump() const

Definition MemorySSA.cpp:1902

simple_ilist< MemoryAccess, ilist_tag< MSSAHelpers::DefsOnlyTag > > DefsList

LLVM_ABI void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)

Definition MemorySSA.cpp:1694

const AccessList * getBlockAccesses(const BasicBlock *BB) const

Return the list of MemoryAccess's for a given basic block.

void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)

LLVM_ABI MemorySSAWalker * getSkipSelfWalker()

Definition MemorySSA.cpp:1603

LLVM_ABI MemorySSA(Function &, AliasAnalysis *, DominatorTree *)

Definition MemorySSA.cpp:1233

iplist< MemoryAccess, ilist_tag< MSSAHelpers::AllAccessTag > > AccessList

LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const

Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...

Definition MemorySSA.cpp:2173

void verifyOrderingDominationAndDefUses(IterT Blocks, VerificationLevel=VerificationLevel::Fast) const

Verify ordering: the order and existence of MemoryAccesses matches the order and existence of memory ...

Definition MemorySSA.cpp:2021

AccessList * getWritableBlockAccesses(const BasicBlock *BB) const

LLVM_ABI void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const

Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...

Definition MemorySSA.cpp:1905

LLVM_ABI void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)

Definition MemorySSA.cpp:1618

LLVM_ABI MemorySSAWalker * getWalker()

Definition MemorySSA.cpp:1590

InsertionPlace

Used in various insertion functions to specify whether we are talking about the beginning or end of a...

LLVM_ABI void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)

Definition MemorySSA.cpp:1650

LLVM_ABI MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)

Definition MemorySSA.cpp:1725

DefsList * getWritableBlockDefs(const BasicBlock *BB) const

DominatorTree & getDomTree() const

MemoryUseOrDef * getMemoryAccess(const Instruction *I) const

Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.

MemoryAccess * getLiveOnEntryDef() const

LLVM_ABI void removeFromLookups(MemoryAccess *)

Properly remove MA from all of MemorySSA's lookup tables.

Definition MemorySSA.cpp:1839

LLVM_ABI void ensureOptimizedUses()

By default, uses are not optimized during MemorySSA construction.

Definition MemorySSA.cpp:2200

void verifyDominationNumbers(IterT Blocks) const

Verify that all of the blocks we believe to have valid domination numbers actually have valid dominat...

Definition MemorySSA.cpp:1979

void verifyPrevDefInPhis(IterT Blocks) const

Definition MemorySSA.cpp:1942

const DefsList * getBlockDefs(const BasicBlock *BB) const

Return the list of MemoryDef's and MemoryPhi's for a given basic block.

LLVM_ABI bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const

Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...

Definition MemorySSA.cpp:2142

LLVM_ABI void removeFromLists(MemoryAccess *, bool ShouldDelete=true)

Properly remove MA from all of MemorySSA's lists.

Definition MemorySSA.cpp:1866

bool isLiveOnEntryDef(const MemoryAccess *MA) const

Return true if MA represents the live on entry value.

LLVM_ABI ~MemorySSA()

Definition MemorySSA.cpp:1272

LLVM_ABI void print(raw_ostream &) const

Definition MemorySSA.cpp:1893

Class that has the common methods + fields of memory uses/defs.

MemoryAccess * getDefiningAccess() const

Get the access that produces the memory state used by this Use.

void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)

void setOptimized(MemoryAccess *)

Sets the optimized use for a MemoryDef.

Instruction * getMemoryInst() const

Get the instruction that this MemoryUse represents.

LLVM_ABI void print(raw_ostream &OS) const

Definition MemorySSA.cpp:2262

A Module instance is used to store all the information related to an LLVM module.

void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW, bool ShouldPreserveUseListOrder=false, bool IsForDebug=false) const

Print the module to an output stream with an optional AssemblyAnnotationWriter.

AnalysisType & getAnalysis() const

getAnalysis() - This function is used by subclasses to get to the analysis information ...

A set of analyses that are preserved following a run of a transformation pass.

static PreservedAnalyses all()

Construct a special preserved set that preserves all passes.

PreservedAnalysisChecker getChecker() const

Build a checker for this PreservedAnalyses and the specified analysis type.

A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...

bool erase(PtrType Ptr)

Remove pointer from the set.

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

void insert_range(Range &&R)

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

StringRef - Represent a constant reference to a string, i.e.

std::string str() const

str - Get the contents as an std::string.

size_t count(char C) const

Return the number of occurrences of C in the string.

A Use represents the edge between a Value definition and its users.

User * getUser() const

Returns the User that contains this Use.

void dropAllReferences()

Drop all references to operands.

unsigned getNumOperands() const

LLVM Value Representation.

iterator_range< user_iterator > users()

unsigned getValueID() const

Return an ID for the concrete type of this object.

LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const

Print the name of this Value out to the specified raw_ostream.

LLVM_ABI const Value * stripPointerCasts() const

Strip off pointer casts, all-zero GEPs and address space casts.

iterator_range< use_iterator > uses()

LLVM_ABI StringRef getName() const

Return a constant reference to the value's name.

std::pair< iterator, bool > insert(const ValueT &V)

formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...

An opaque object representing a hash code.

typename base_list_type::iterator iterator

This class implements an extremely fast bulk output stream that can only output to a stream.

A raw_ostream that writes to an std::string.

reverse_iterator rbegin()

This provides a very simple, boring adaptor for a begin and end iterator into a range type.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

Abstract Attribute helper functions.

@ C

The default llvm calling convention, compatible with C.

@ BasicBlock

Various leaf nodes.

This namespace contains all of the command line option processing machinery.

initializer< Ty > init(const Ty &Val)

LocationClass< Ty > location(Ty &L)

NodeAddr< DefNode * > Def

NodeAddr< PhiNode * > Phi

NodeAddr< UseNode * > Use

NodeAddr< NodeBase * > Node

friend class Instruction

Iterator for Instructions in a `BasicBlock.

This is an optimization pass for GlobalISel generic memory operations.

FunctionAddr VTableAddr Value

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

static cl::opt< unsigned long > StopAt("sbvec-stop-at", cl::init(StopAtDisabled), cl::Hidden, cl::desc("Vectorize if the invocation count is < than this. 0 " "disables vectorization."))

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

APInt operator*(APInt a, uint64_t RHS)

auto successors(const MachineBasicBlock *BB)

const Value * getLoadStorePointerOperand(const Value *V)

A helper function that returns the pointer operand of a load or store instruction.

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)

Make a range that does early increment to allow mutation of the underlying range without disrupting i...

auto pred_size(const MachineBasicBlock *BB)

raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")

upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT)

bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)

auto map_range(ContainerTy &&C, FuncTy F)

DomTreeNodeBase< BasicBlock > DomTreeNode

auto dyn_cast_or_null(const Y &Val)

std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair

iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)

bool isModSet(const ModRefInfo MRI)

auto find_if_not(R &&Range, UnaryPredicate P)

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)

bool isModOrRefSet(const ModRefInfo MRI)

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

IDFCalculator< false > ForwardIDFCalculator

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >

ModRefInfo

Flags indicating whether a memory access modifies or references memory.

@ ModRef

The access may reference and may modify the value stored in memory.

@ First

Helpers to iterate all locations in the MemoryEffectsBase class.

LLVM_ABI bool VerifyMemorySSA

Enables verification of MemorySSA.

Definition MemorySSA.cpp:84

upward_defs_iterator upward_defs_end()

FunctionAddr VTableAddr Next

DWARFExpression::Operation Op

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

auto find_if(R &&Range, UnaryPredicate P)

Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.

auto predecessors(const MachineBasicBlock *BB)

bool is_contained(R &&Range, const E &Element)

Returns true if Element is found in Range.

iterator_range< df_iterator< T > > depth_first(const T &G)

AnalysisManager< Function > FunctionAnalysisManager

Convenience typedef for the Function analysis manager.

hash_code hash_combine(const Ts &...args)

Combine values into a single hash_code.

SuccIterator< const Instruction, const BasicBlock > const_succ_iterator

AAResults AliasAnalysis

Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.

bool isRefSet(const ModRefInfo MRI)

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I, DOTFuncMSSAInfo *CFGInfo)

Display the raw branch weights from PGO.

Definition MemorySSA.cpp:2349

std::string getNodeAttributes(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)

Definition MemorySSA.cpp:2354

static std::string getEdgeSourceLabel(const BasicBlock *Node, const_succ_iterator I)

Definition MemorySSA.cpp:2343

std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)

Definition MemorySSA.cpp:2327

static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo)

Definition MemorySSA.cpp:2322

DOTGraphTraits(bool IsSimple=false)

Definition MemorySSA.cpp:2320

DefaultDOTGraphTraits(bool simple=false)

static std::string getEdgeSourceLabel(const void *, EdgeIter)

getEdgeSourceLabel - If you want to label the edge source itself, implement this method.

static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)

Definition MemorySSA.cpp:243

static unsigned getHashValue(const MemoryLocOrCall &MLOC)

Definition MemorySSA.cpp:228

static MemoryLocOrCall getEmptyKey()

Definition MemorySSA.cpp:220

static MemoryLocOrCall getTombstoneKey()

Definition MemorySSA.cpp:224

An information struct used to provide DenseMap with the various necessary components for a given valu...

pointer_iterator< Function::const_iterator > nodes_iterator

Definition MemorySSA.cpp:2302

static size_t size(DOTFuncMSSAInfo *CFGInfo)

Definition MemorySSA.cpp:2312

static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo)

Definition MemorySSA.cpp:2297

static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo)

Definition MemorySSA.cpp:2304

static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo)

Definition MemorySSA.cpp:2308

typename DOTFuncMSSAInfo *::UnknownGraphTypeError NodeRef

LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)

LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)

Definition MemorySSA.cpp:2408