LLVM: lib/Analysis/MemorySSA.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

30#include "llvm/Config/llvm-config.h"

53#include

54#include

55#include

56#include

57#include

58

59using namespace llvm;

60

61#define DEBUG_TYPE "memoryssa"

62

67

69 true)

74

76 "memssa-check-limit", cl::Hidden, cl::init(100),

78 "will consider trying to walk past (default = 100)"));

79

80

81#ifdef EXPENSIVE_CHECKS

83#else

85#endif

86

90

92

93namespace {

94

95

96

99

100public:

101 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}

102

106 OS << "; " << *MA << "\n";

107 }

108

112 OS << "; " << *MA << "\n";

113 }

114};

115

116

117

122

123public:

124 MemorySSAWalkerAnnotatedWriter(MemorySSA *M)

125 : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {}

126

130 OS << "; " << *MA << "\n";

131 }

132

137 OS << "; " << *MA;

138 if (Clobber) {

139 OS << " - clobbered by ";

142 else

143 OS << *Clobber;

144 }

145 OS << "\n";

146 }

147 }

148};

149

150}

151

152namespace {

153

154

155

156

157

158

159class MemoryLocOrCall {

160public:

161 bool IsCall = false;

162

164 : MemoryLocOrCall(MUD->getMemoryInst()) {}

166 : MemoryLocOrCall(MUD->getMemoryInst()) {}

167

169 if (auto *C = dyn_cast(Inst)) {

170 IsCall = true;

172 } else {

173 IsCall = false;

174

175

176 if (!isa(Inst))

178 }

179 }

180

181 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}

182

183 const CallBase *getCall() const {

186 }

187

190 return Loc;

191 }

192

194 if (IsCall != Other.IsCall)

195 return false;

196

197 if (!IsCall)

198 return Loc == Other.Loc;

199

200 if (Call->getCalledOperand() != Other.Call->getCalledOperand())

201 return false;

202

203 return Call->arg_size() == Other.Call->arg_size() &&

204 std::equal(Call->arg_begin(), Call->arg_end(),

205 Other.Call->arg_begin());

206 }

207

208private:

209 union {

212 };

213};

214

215}

216

217namespace llvm {

218

222 }

223

226 }

227

228 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {

229 if (!MLOC.IsCall)

231 MLOC.IsCall,

233

236 MLOC.getCall()->getCalledOperand()));

237

238 for (const Value *Arg : MLOC.getCall()->args())

240 return hash;

241 }

242

243 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {

245 }

246};

247

248}

249

250

251

252

253

254

255

257 const LoadInst *MayClobber) {

258 bool VolatileUse = Use->isVolatile();

259 bool VolatileClobber = MayClobber->isVolatile();

260

261 if (VolatileUse && VolatileClobber)

262 return false;

263

264

265

266

267

268

269

270

271

272

273

274 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;

276 AtomicOrdering::Acquire);

277 return !(SeqCstUse || MayClobberIsAcquire);

278}

279

280template

281static bool

283 const Instruction *UseInst, AliasAnalysisType &AA) {

285 assert(DefInst && "Defining instruction not actually an instruction");

286

287 if (const IntrinsicInst *II = dyn_cast(DefInst)) {

288

289

290

291

292

293

294

295 switch (II->getIntrinsicID()) {

296 case Intrinsic::allow_runtime_check:

297 case Intrinsic::allow_ubsan_check:

298 case Intrinsic::invariant_start:

299 case Intrinsic::invariant_end:

300 case Intrinsic::assume:

301 case Intrinsic::experimental_noalias_scope_decl:

302 case Intrinsic::pseudoprobe:

303 return false;

304 case Intrinsic::dbg_declare:

305 case Intrinsic::dbg_label:

306 case Intrinsic::dbg_value:

307 llvm_unreachable("debuginfo shouldn't have associated defs!");

308 default:

309 break;

310 }

311 }

312

313 if (auto *CB = dyn_cast_or_null(UseInst)) {

314 ModRefInfo I = AA.getModRefInfo(DefInst, CB);

316 }

317

318 if (auto *DefLoad = dyn_cast(DefInst))

319 if (auto *UseLoad = dyn_cast_or_null(UseInst))

321

322 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);

324}

325

326template

328 const MemoryLocOrCall &UseMLOC,

329 AliasAnalysisType &AA) {

330

331

332 if (UseMLOC.IsCall)

334 AA);

336 AA);

337}

338

339

343}

344

345namespace {

346

347struct UpwardsMemoryQuery {

348

349 bool IsCall = false;

350

351

353

355

356 const MemoryAccess *OriginalAccess = nullptr;

357 bool SkipSelfAccess = false;

358

359 UpwardsMemoryQuery() = default;

360

362 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {

363 if (!IsCall)

365 }

366};

367

368}

369

370template

373

374

375 if (auto *LI = dyn_cast(I)) {

376 return I->hasMetadata(LLVMContext::MD_invariant_load) ||

378 }

379 return false;

380}

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

400 bool AllowImpreciseClobber = false) {

401 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");

402

405 "liveOnEntry must clobber itself");

406 return;

407 }

408

409 bool FoundClobber = false;

413

414

415 while (!Worklist.empty()) {

417

418

419 if (!VisitedPhis.insert(MAP).second)

420 continue;

421

422 for (const auto *MA : def_chain(MAP.first)) {

423 if (MA == ClobberAt) {

424 if (const auto *MD = dyn_cast(MA)) {

425

426

427

428

429

431 if (!FoundClobber) {

433 FoundClobber = true;

434 }

435 }

436 break;

437 }

438

439

441

442 if (const auto *MD = dyn_cast(MA)) {

443

444 if (MD == Start)

445 continue;

446

448 "Found clobber before reaching ClobberAt!");

449 continue;

450 }

451

452 if (const auto *MU = dyn_cast(MA)) {

453 (void)MU;

454 assert (MU == Start &&

455 "Can only find use in def chain if Start is a use");

456 continue;

457 }

458

459 assert(isa(MA));

460

461

466 ItB != ItE; ++ItB)

469 }

470 }

471

472

473

474

475

476

477 if (AllowImpreciseClobber)

478 return;

479

480

481

482 assert((isa(ClobberAt) || FoundClobber) &&

483 "ClobberAt never acted as a clobber");

484}

485

486namespace {

487

488

489

490class ClobberWalker {

491

493

494

495

496 struct DefPath {

498

499

502 std::optional Previous;

503

505 std::optional Previous)

507

509 std::optional Previous)

510 : DefPath(Loc, Init, Init, Previous) {}

511 };

512

516 UpwardsMemoryQuery *Query;

517 unsigned *UpwardWalkLimit;

518

519

520

522

523

525

526

528 assert(From->getNumOperands() && "Phi with no operands?");

529

533 while ((Node = Node->getIDom())) {

535 if (Defs)

536 return &*Defs->rbegin();

537 }

539 }

540

541

542 struct UpwardsWalkResult {

543

544

546 bool IsKnownClobber;

547 };

548

549

550

551

552

553

554 UpwardsWalkResult

555 walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,

556 const MemoryAccess *SkipStopAt = nullptr) const {

557 assert(!isa(Desc.Last) && "Uses don't exist in my world");

558 assert(UpwardWalkLimit && "Need a valid walk limit");

559 bool LimitAlreadyReached = false;

560

561

562

563

564 if (!*UpwardWalkLimit) {

565 *UpwardWalkLimit = 1;

566 LimitAlreadyReached = true;

567 }

568

570 Desc.Last = Current;

571 if (Current == StopAt || Current == SkipStopAt)

572 return {Current, false};

573

574 if (auto *MD = dyn_cast(Current)) {

576 return {MD, true};

577

578 if (!--*UpwardWalkLimit)

579 return {Current, true};

580

582 return {MD, true};

583 }

584 }

585

586 if (LimitAlreadyReached)

587 *UpwardWalkLimit = 0;

588

590 "Ended at a non-clobber that's not a phi?");

591 return {Desc.Last, false};

592 }

593

595 ListIndex PriorNode) {

601 }

602 }

603

604

605

606

607 struct TerminatedPath {

609 ListIndex LastNode;

610 };

611

612

613

614

615

616

617

618

619

620

621 std::optional

622 getBlockingAccess(const MemoryAccess *StopWhere,

626 assert(!PausedSearches.empty() && "No searches to continue?");

627

628

629

630 while (!PausedSearches.empty()) {

631 ListIndex PathIndex = PausedSearches.pop_back_val();

632 DefPath &Node = Paths[PathIndex];

633

634

635

636

637

638

639

640

641

642

643

644

645

646

647

648

649

650

651

652 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)

653 continue;

654

656 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {

657 assert(isa(Query->OriginalAccess));

658 SkipStopWhere = Query->OriginalAccess;

659 }

660

661 UpwardsWalkResult Res = walkToPhiOrClobber(Node,

662 StopWhere,

663 SkipStopWhere);

664 if (Res.IsKnownClobber) {

665 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);

666

667

668

669 TerminatedPath Term{Res.Result, PathIndex};

670 if (!MSSA.dominates(Res.Result, StopWhere))

672

673

675 continue;

676 }

677

678 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {

679

680

681

682

683 if (Res.Result != SkipStopWhere)

685 continue;

686 }

687

689 addSearches(cast(Res.Result), PausedSearches, PathIndex);

690 }

691

692 return std::nullopt;

693 }

694

695 template <typename T, typename Walker>

696 struct generic_def_path_iterator

698 std::forward_iterator_tag, T *> {

699 generic_def_path_iterator() = default;

700 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}

701

702 T &operator*() const { return curNode(); }

703

704 generic_def_path_iterator &operator++() {

705 N = curNode().Previous;

706 return *this;

707 }

708

709 bool operator==(const generic_def_path_iterator &O) const {

710 if (N.has_value() != O.N.has_value())

711 return false;

712 return N || *N == *O.N;

713 }

714

715 private:

716 T &curNode() const { return W->Paths[*N]; }

717

718 Walker *W = nullptr;

719 std::optional N;

720 };

721

722 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;

723 using const_def_path_iterator =

724 generic_def_path_iterator<const DefPath, const ClobberWalker>;

725

727 return make_range(def_path_iterator(this, From), def_path_iterator());

728 }

729

731 return make_range(const_def_path_iterator(this, From),

732 const_def_path_iterator());

733 }

734

735 struct OptznResult {

736

737 TerminatedPath PrimaryClobber;

738

739

741 };

742

743 ListIndex defPathIndex(const DefPath &N) const {

744

745 const DefPath *NP = &N;

747 "Out of bounds DefPath!");

748 return NP - &Paths.front();

749 }

750

751

752

753

754

755

756

757

758

759

760

761

762

763

767 "Reset the optimization state.");

768

769 Paths.emplace_back(Loc, Start, Phi, std::nullopt);

770

771

772 auto PriorPathsSize = Paths.size();

773

777

778 addSearches(Phi, PausedSearches, 0);

779

780

781

783 assert(!Paths.empty() && "Need a path to move");

784 auto Dom = Paths.begin();

785 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)

786 if (!MSSA.dominates(I->Clobber, Dom->Clobber))

787 Dom = I;

788 auto Last = Paths.end() - 1;

789 if (Last != Dom)

790 std::iter_swap(Last, Dom);

791 };

792

794 while (true) {

796 "liveOnEntry wasn't treated as a clobber?");

797

798 const auto *Target = getWalkTarget(Current);

799

800

801 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {

803 }));

804

805

806

807

808 if (std::optional Blocker = getBlockingAccess(

809 Target, PausedSearches, NewPaused, TerminatedPaths)) {

810

811

812

813 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {

814 return defPathIndex(N) < PriorPathsSize;

815 });

816 assert(Iter != def_path_iterator());

817

818 DefPath &CurNode = *Iter;

819 assert(CurNode.Last == Current);

820

821

822

823

824

825

826

827

828

829

830

831

832

833

834

835

836

837

838

839

840

841

842

843

844

845

846 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};

848 }

849

850

851

852

853 if (NewPaused.empty()) {

854 MoveDominatedPathToEnd(TerminatedPaths);

856 return {Result, std::move(TerminatedPaths)};

857 }

858

861 for (ListIndex Paused : NewPaused) {

862 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);

863 if (WR.IsKnownClobber)

864 Clobbers.push_back({WR.Result, Paused});

865 else

866

867 DefChainEnd = WR.Result;

868 }

869

870 if (!TerminatedPaths.empty()) {

871

872

873 if (!DefChainEnd)

875 DefChainEnd = MA;

876 assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");

877

878

879

881 for (const TerminatedPath &TP : TerminatedPaths) {

882

883

884 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))

886 }

887 }

888

889

890

891 if (!Clobbers.empty()) {

892 MoveDominatedPathToEnd(Clobbers);

894 return {Result, std::move(Clobbers)};

895 }

896

898 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));

899

900

901 auto *DefChainPhi = cast(DefChainEnd);

902

903 PriorPathsSize = Paths.size();

904 PausedSearches.clear();

905 for (ListIndex I : NewPaused)

906 addSearches(DefChainPhi, PausedSearches, I);

907 NewPaused.clear();

908

909 Current = DefChainPhi;

910 }

911 }

912

913 void verifyOptResult(const OptznResult &R) const {

914 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {

915 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);

916 }));

917 }

918

919 void resetPhiOptznState() {

921 VisitedPhis.clear();

922 }

923

924public:

926 : MSSA(MSSA), DT(DT) {}

927

928

929

931 UpwardsMemoryQuery &Q, unsigned &UpWalkLimit) {

932 AA = &BAA;

933 Query = &Q;

934 UpwardWalkLimit = &UpWalkLimit;

935

936 if (!UpWalkLimit)

937 UpWalkLimit++;

938

940

941

942 if (auto *MU = dyn_cast(Start))

943 Current = MU->getDefiningAccess();

944

945 DefPath FirstDesc(Q.StartingLoc, Current, Current, std::nullopt);

946

947

948 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);

950 if (WalkResult.IsKnownClobber) {

951 Result = WalkResult.Result;

952 } else {

953 OptznResult OptRes = tryOptimizePhi(cast(FirstDesc.Last),

954 Current, Q.StartingLoc);

955 verifyOptResult(OptRes);

956 resetPhiOptznState();

957 Result = OptRes.PrimaryClobber.Clobber;

958 }

959

960#ifdef EXPENSIVE_CHECKS

961 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)

963#endif

965 }

966};

967

968struct RenamePassData {

972

975 : DTN(D), ChildIt(It), IncomingVal(M) {}

976

977 void swap(RenamePassData &RHS) {

981 }

982};

983

984}

985

986namespace llvm {

987

989 ClobberWalker Walker;

991

992public:

994

998

999

1000

1001

1002

1003

1005 unsigned &, bool,

1006 bool UseInvariantGroup = true);

1007};

1008

1009

1010

1011

1014

1015public:

1019

1021

1023 unsigned &UWL) {

1024 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false);

1025 }

1029 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);

1030 }

1031

1034 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false, false);

1035 }

1036

1041 }

1047 }

1048

1050 if (auto *MUD = dyn_cast(MA))

1051 MUD->resetOptimized();

1052 }

1053};

1054

1057

1058public:

1062

1064

1066 unsigned &UWL) {

1067 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, true);

1068 }

1072 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);

1073 }

1074

1079 }

1085 }

1086

1088 if (auto *MUD = dyn_cast(MA))

1089 MUD->resetOptimized();

1090 }

1091};

1092

1093}

1094

1096 bool RenameAllUses) {

1097

1099 auto It = PerBlockAccesses.find(S);

1100

1101 if (It == PerBlockAccesses.end() || !isa(It->second->front()))

1102 continue;

1103 AccessList *Accesses = It->second.get();

1104 auto *Phi = cast(&Accesses->front());

1105 if (RenameAllUses) {

1106 bool ReplacementDone = false;

1107 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)

1108 if (Phi->getIncomingBlock(I) == BB) {

1109 Phi->setIncomingValue(I, IncomingVal);

1110 ReplacementDone = true;

1111 }

1112 (void) ReplacementDone;

1113 assert(ReplacementDone && "Incomplete phi during partial rename");

1114 } else

1115 Phi->addIncoming(IncomingVal, BB);

1116 }

1117}

1118

1119

1120

1121

1123 bool RenameAllUses) {

1124 auto It = PerBlockAccesses.find(BB);

1125

1126 if (It != PerBlockAccesses.end()) {

1127 AccessList *Accesses = It->second.get();

1129 if (MemoryUseOrDef *MUD = dyn_cast(&L)) {

1130 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)

1131 MUD->setDefiningAccess(IncomingVal);

1132 if (isa(&L))

1133 IncomingVal = &L;

1134 } else {

1135 IncomingVal = &L;

1136 }

1137 }

1138 }

1139 return IncomingVal;

1140}

1141

1142

1143

1144

1145

1148 bool SkipVisited, bool RenameAllUses) {

1149 assert(Root && "Trying to rename accesses in an unreachable block");

1150

1152

1153

1154

1155 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;

1156 if (SkipVisited && AlreadyVisited)

1157 return;

1158

1159 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);

1160 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);

1161 WorkStack.push_back({Root, Root->begin(), IncomingVal});

1162

1163 while (!WorkStack.empty()) {

1166 IncomingVal = WorkStack.back().IncomingVal;

1167

1168 if (ChildIt == Node->end()) {

1170 } else {

1172 ++WorkStack.back().ChildIt;

1174

1175

1176 AlreadyVisited = !Visited.insert(BB).second;

1177 if (SkipVisited && AlreadyVisited) {

1178

1179

1180

1181

1182

1184 IncomingVal = &*BlockDefs->rbegin();

1185 } else

1186 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);

1187 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);

1188 WorkStack.push_back({Child, Child->begin(), IncomingVal});

1189 }

1190 }

1191}

1192

1193

1194

1195

1196void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {

1198 "Reachable block found while handling unreachable blocks");

1199

1200

1201

1202

1203

1206 continue;

1207 auto It = PerBlockAccesses.find(S);

1208

1209 if (It == PerBlockAccesses.end() || !isa(It->second->front()))

1210 continue;

1211 AccessList *Accesses = It->second.get();

1212 auto *Phi = cast(&Accesses->front());

1213 Phi->addIncoming(LiveOnEntryDef.get(), BB);

1214 }

1215

1216 auto It = PerBlockAccesses.find(BB);

1217 if (It == PerBlockAccesses.end())

1218 return;

1219

1220 auto &Accesses = It->second;

1221 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {

1222 auto Next = std::next(AI);

1223

1224

1225 if (auto *UseOrDef = dyn_cast(AI))

1226 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());

1227 else

1228 Accesses->erase(AI);

1229 AI = Next;

1230 }

1231}

1232

1234 : DT(DT), F(&Func), LiveOnEntryDef(nullptr), Walker(nullptr),

1235 SkipWalker(nullptr) {

1236

1237

1238

1239

1240

1241 assert(AA && "No alias analysis?");

1244

1245

1246 this->AA = AA;

1247

1249}

1250

1252 : DT(DT), L(&L), LiveOnEntryDef(nullptr), Walker(nullptr),

1253 SkipWalker(nullptr) {

1254

1255

1256

1257

1258

1259 assert(AA && "No alias analysis?");

1261 buildMemorySSA(

1263 return *const_cast<BasicBlock *>(BB);

1264 }));

1265

1266

1267 this->AA = AA;

1268

1270}

1271

1273

1274 for (const auto &Pair : PerBlockAccesses)

1277}

1278

1280 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));

1281

1282 if (Res.second)

1283 Res.first->second = std::make_unique();

1284 return Res.first->second.get();

1285}

1286

1288 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));

1289

1290 if (Res.second)

1291 Res.first->second = std::make_unique();

1292 return Res.first->second.get();

1293}

1294

1295namespace llvm {

1296

1297

1298

1299

1300

1301

1302

1303

1305public:

1308 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}

1309

1311

1312private:

1313

1314 struct MemlocStackInfo {

1315

1316

1317 unsigned long StackEpoch;

1318 unsigned long PopEpoch;

1319

1320

1321

1322

1323 unsigned long LowerBound;

1325

1326 unsigned long LastKill;

1327 bool LastKillValid;

1328 };

1329

1330 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,

1333

1335 CachingWalker *Walker;

1338};

1339

1340}

1341

1342

1343

1344

1345

1346

1347

1348

1349

1350

1351

1352

1353

1354void MemorySSA::OptimizeUses::optimizeUsesInBlock(

1355 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,

1358

1359

1361 if (Accesses == nullptr)

1362 return;

1363

1364

1365

1366 while (true) {

1368 !VersionStack.empty() &&

1369 "Version stack should have liveOnEntry sentinel dominating everything");

1370 BasicBlock *BackBlock = VersionStack.back()->getBlock();

1371 if (DT->dominates(BackBlock, BB))

1372 break;

1373 while (VersionStack.back()->getBlock() == BackBlock)

1375 ++PopEpoch;

1376 }

1377

1379 auto *MU = dyn_cast(&MA);

1380 if (!MU) {

1382 ++StackEpoch;

1383 continue;

1384 }

1385

1386 if (MU->isOptimized())

1387 continue;

1388

1389 MemoryLocOrCall UseMLOC(MU);

1390 auto &LocInfo = LocStackInfo[UseMLOC];

1391

1392

1393

1394 if (LocInfo.PopEpoch != PopEpoch) {

1395 LocInfo.PopEpoch = PopEpoch;

1396 LocInfo.StackEpoch = StackEpoch;

1397

1398

1399

1400

1401

1402

1403

1404

1405

1406

1407

1408 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&

1409 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {

1410

1411

1412

1413 LocInfo.LowerBound = 0;

1414 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();

1415 LocInfo.LastKillValid = false;

1416 }

1417 } else if (LocInfo.StackEpoch != StackEpoch) {

1418

1419

1420

1421 LocInfo.PopEpoch = PopEpoch;

1422 LocInfo.StackEpoch = StackEpoch;

1423 }

1424 if (!LocInfo.LastKillValid) {

1425 LocInfo.LastKill = VersionStack.size() - 1;

1426 LocInfo.LastKillValid = true;

1427 }

1428

1429

1430

1431 assert(LocInfo.LowerBound < VersionStack.size() &&

1432 "Lower bound out of range");

1433 assert(LocInfo.LastKill < VersionStack.size() &&

1434 "Last kill info out of range");

1435

1436 unsigned long UpperBound = VersionStack.size() - 1;

1437

1438 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {

1439 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("

1440 << *(MU->getMemoryInst()) << ")"

1441 << " because there are "

1442 << UpperBound - LocInfo.LowerBound

1443 << " stores to disambiguate\n");

1444

1445

1446 LocInfo.LastKillValid = false;

1447 continue;

1448 }

1449 bool FoundClobberResult = false;

1451 while (UpperBound > LocInfo.LowerBound) {

1452 if (isa(VersionStack[UpperBound])) {

1453

1454

1455

1458 MU, *AA, UpwardWalkLimit);

1459

1460 while (VersionStack[UpperBound] != Result) {

1461 assert(UpperBound != 0);

1462 --UpperBound;

1463 }

1464 FoundClobberResult = true;

1465 break;

1466 }

1467

1468 MemoryDef *MD = cast(VersionStack[UpperBound]);

1470 FoundClobberResult = true;

1471 break;

1472 }

1473 --UpperBound;

1474 }

1475

1476

1477

1478 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {

1479 MU->setDefiningAccess(VersionStack[UpperBound], true);

1480 LocInfo.LastKill = UpperBound;

1481 } else {

1482

1483

1484 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true);

1485 }

1486 LocInfo.LowerBound = VersionStack.size() - 1;

1487 LocInfo.LowerBoundBlock = BB;

1488 }

1489}

1490

1491

1496

1497 unsigned long StackEpoch = 1;

1498 unsigned long PopEpoch = 1;

1499

1501 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,

1502 LocStackInfo);

1503}

1504

1505void MemorySSA::placePHINodes(

1507

1509 IDFs.setDefiningBlocks(DefiningBlocks);

1511 IDFs.calculate(IDFBlocks);

1512

1513

1514 for (auto &BB : IDFBlocks)

1515 createMemoryPhi(BB);

1516}

1517

1518template

1520

1521

1522

1523

1524

1525

1528 nullptr, &StartingPoint, NextID++));

1529

1530

1531

1532

1534

1535

1537 bool InsertIntoDef = false;

1542 if (!MUD)

1543 continue;

1544

1545 if (!Accesses)

1546 Accesses = getOrCreateAccessList(&B);

1547 Accesses->push_back(MUD);

1548 if (isa(MUD)) {

1549 InsertIntoDef = true;

1550 if (!Defs)

1551 Defs = getOrCreateDefsList(&B);

1552 Defs->push_back(*MUD);

1553 }

1554 }

1555 if (InsertIntoDef)

1556 DefiningBlocks.insert(&B);

1557 }

1558 placePHINodes(DefiningBlocks);

1559

1560

1561

1563 if (L) {

1564

1565

1566

1569 U.set(LiveOnEntryDef.get());

1571 }

1572

1573

1576 Visited.insert(ExitBlocks.begin(), ExitBlocks.end());

1578 Visited);

1579 } else {

1581 }

1582

1583

1584

1585 for (auto &BB : Blocks)

1586 if (!Visited.count(&BB))

1587 markUnreachableAsLiveOnEntry(&BB);

1588}

1589

1591

1593 if (Walker)

1594 return Walker.get();

1595

1596 if (!WalkerBase)

1597 WalkerBase = std::make_unique(this, DT);

1598

1599 Walker = std::make_unique(this, WalkerBase.get());

1600 return Walker.get();

1601}

1602

1604 if (SkipWalker)

1605 return SkipWalker.get();

1606

1607 if (!WalkerBase)

1608 WalkerBase = std::make_unique(this, DT);

1609

1610 SkipWalker = std::make_unique(this, WalkerBase.get());

1611 return SkipWalker.get();

1612 }

1613

1614

1615

1616

1617

1621 auto *Accesses = getOrCreateAccessList(BB);

1623

1624

1625 if (isa(NewAccess)) {

1626 Accesses->push_front(NewAccess);

1627 auto *Defs = getOrCreateDefsList(BB);

1628 Defs->push_front(*NewAccess);

1629 } else {

1631 *Accesses, [](const MemoryAccess &MA) { return isa(MA); });

1632 Accesses->insert(AI, NewAccess);

1633 if (!isa(NewAccess)) {

1634 auto *Defs = getOrCreateDefsList(BB);

1636 *Defs, [](const MemoryAccess &MA) { return isa(MA); });

1637 Defs->insert(DI, *NewAccess);

1638 }

1639 }

1640 } else {

1641 Accesses->push_back(NewAccess);

1642 if (!isa(NewAccess)) {

1643 auto *Defs = getOrCreateDefsList(BB);

1644 Defs->push_back(*NewAccess);

1645 }

1646 }

1647 BlockNumberingValid.erase(BB);

1648}

1649

1653 bool WasEnd = InsertPt == Accesses->end();

1655 if (!isa(What)) {

1656 auto *Defs = getOrCreateDefsList(BB);

1657

1658

1659

1660

1661 if (WasEnd) {

1662 Defs->push_back(*What);

1663 } else if (isa(InsertPt)) {

1664 Defs->insert(InsertPt->getDefsIterator(), *What);

1665 } else {

1666 while (InsertPt != Accesses->end() && !isa(InsertPt))

1667 ++InsertPt;

1668

1669 if (InsertPt == Accesses->end())

1670 Defs->push_back(*What);

1671 else

1672 Defs->insert(InsertPt->getDefsIterator(), *What);

1673 }

1674 }

1675 BlockNumberingValid.erase(BB);

1676}

1677

1679

1681

1682

1683

1684

1685 if (auto *MD = dyn_cast(What))

1688}

1689

1690

1691

1692

1693

1696 prepareForMoveTo(What, BB);

1698}

1699

1702 if (isa(What)) {

1704 "Can only move a Phi at the beginning of the block");

1705

1706 ValueToMemoryAccess.erase(What->getBlock());

1707 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;

1708 (void)Inserted;

1709 assert(Inserted && "Cannot move a Phi to a block that already has one");

1710 }

1711

1712 prepareForMoveTo(What, BB);

1714}

1715

1719

1721 ValueToMemoryAccess[BB] = Phi;

1722 return Phi;

1723}

1724

1728 bool CreationMustSucceed) {

1729 assert(!isa(I) && "Cannot create a defined access for a PHI");

1731 if (CreationMustSucceed)

1732 assert(NewAccess != nullptr && "Tried to create a memory access for a "

1733 "non-memory touching instruction");

1734 if (NewAccess) {

1735 assert((!Definition || !isa(Definition)) &&

1736 "A use cannot be a defining access");

1738 }

1739 return NewAccess;

1740}

1741

1742

1743

1744

1746 if (auto *SI = dyn_cast(I)) {

1747 if (!SI->isUnordered())

1748 return true;

1749 } else if (auto *LI = dyn_cast(I)) {

1750 if (!LI->isUnordered())

1751 return true;

1752 }

1753 return false;

1754}

1755

1756

1757template

1759 AliasAnalysisType *AAP,

1761

1762

1763

1764

1765

1766

1768 switch (II->getIntrinsicID()) {

1769 default:

1770 break;

1771 case Intrinsic::allow_runtime_check:

1772 case Intrinsic::allow_ubsan_check:

1773 case Intrinsic::assume:

1774 case Intrinsic::experimental_noalias_scope_decl:

1775 case Intrinsic::pseudoprobe:

1776 return nullptr;

1777 }

1778 }

1779

1780

1781

1782

1783 if (I->mayReadFromMemory() && I->mayWriteToMemory())

1784 return nullptr;

1785

1790#if !defined(NDEBUG)

1792 bool DefCheck, UseCheck;

1795

1796

1797 assert((Def == DefCheck || !DefCheck) &&

1798 "Memory accesses should only be reduced");

1799 if (!Def && Use != UseCheck) {

1800

1801 assert(!UseCheck && "Invalid template");

1802 }

1803#endif

1804 } else {

1805

1807

1808

1809

1810

1811

1812

1813

1814

1817 }

1818

1819

1820

1821 if (!Def && Use)

1822 return nullptr;

1823

1825 if (Def) {

1826 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);

1827 } else {

1828 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());

1832 }

1833 }

1834 ValueToMemoryAccess[I] = MUD;

1835 return MUD;

1836}

1837

1838

1841 "Trying to remove memory access that still has uses");

1842 BlockNumbering.erase(MA);

1843 if (auto *MUD = dyn_cast(MA))

1845

1846 if (!isa(MA))

1848

1849 Value *MemoryInst;

1850 if (const auto *MUD = dyn_cast(MA))

1852 else

1853 MemoryInst = MA->getBlock();

1854

1855 auto VMA = ValueToMemoryAccess.find(MemoryInst);

1856 if (VMA->second == MA)

1857 ValueToMemoryAccess.erase(VMA);

1858}

1859

1860

1861

1862

1863

1864

1865

1868

1869

1870 if (!isa(MA)) {

1871 auto DefsIt = PerBlockDefs.find(BB);

1872 std::unique_ptr &Defs = DefsIt->second;

1873 Defs->remove(*MA);

1874 if (Defs->empty())

1875 PerBlockDefs.erase(DefsIt);

1876 }

1877

1878

1879

1880 auto AccessIt = PerBlockAccesses.find(BB);

1881 std::unique_ptr &Accesses = AccessIt->second;

1882 if (ShouldDelete)

1883 Accesses->erase(MA);

1884 else

1885 Accesses->remove(MA);

1886

1887 if (Accesses->empty()) {

1888 PerBlockAccesses.erase(AccessIt);

1889 BlockNumberingValid.erase(BB);

1890 }

1891}

1892

1894 MemorySSAAnnotatedWriter Writer(this);

1896 if (L)

1898 F->print(OS, &Writer);

1899}

1900

1901#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)

1903#endif

1904

1906#if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)

1908#endif

1909

1910#ifndef NDEBUG

1911 if (F) {

1917 } else {

1918 assert(L && "must either have loop or function");

1921 return *const_cast<BasicBlock *>(BB);

1922 });

1927 }

1928#endif

1929

1930

1931

1932

1933

1934

1935

1936

1937

1938

1939}

1940

1941template

1945 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {

1946 auto *Pred = Phi->getIncomingBlock(I);

1947 auto *IncAcc = Phi->getIncomingValue(I);

1948

1949

1950

1951 if (auto *DTNode = DT->getNode(Pred)) {

1952 while (DTNode) {

1953 if (auto *DefList = getBlockDefs(DTNode->getBlock())) {

1954 auto *LastAcc = &*(--DefList->end());

1955 assert(LastAcc == IncAcc &&

1956 "Incorrect incoming access into phi.");

1957 (void)IncAcc;

1958 (void)LastAcc;

1959 break;

1960 }

1961 DTNode = DTNode->getIDom();

1962 }

1963 } else {

1964

1965

1966

1967

1968

1969

1970 }

1971 }

1972 }

1973 }

1974}

1975

1976

1977

1978template

1980 if (BlockNumberingValid.empty())

1981 return;

1982

1985 if (!ValidBlocks.count(&BB))

1986 continue;

1987

1988 ValidBlocks.erase(&BB);

1989

1991

1992 if (!Accesses)

1993 continue;

1994

1995

1996 unsigned long LastNumber = 0;

1998 auto ThisNumberIter = BlockNumbering.find(&MA);

1999 assert(ThisNumberIter != BlockNumbering.end() &&

2000 "MemoryAccess has no domination number in a valid block!");

2001

2002 unsigned long ThisNumber = ThisNumberIter->second;

2003 assert(ThisNumber > LastNumber &&

2004 "Domination numbers should be strictly increasing!");

2005 (void)LastNumber;

2006 LastNumber = ThisNumber;

2007 }

2008 }

2009

2011 "All valid BasicBlocks should exist in F -- dangling pointers?");

2012}

2013

2014

2015

2016

2017

2018

2019

2020template

2023

2024

2025

2032 if (Phi) {

2033

2036

2037 for (const Use &U : Phi->uses()) {

2038 assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");

2039 (void)U;

2040 }

2041

2044 "Incomplete MemoryPhi Node");

2045 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {

2046 verifyUseInDefs(Phi->getIncomingValue(I), Phi);

2048 "Incoming phi block not a block predecessor");

2049 }

2050 }

2051 }

2052

2055 assert((!MA || (AL && (isa(MA) || DL))) &&

2056 "We have memory affecting instructions "

2057 "in this block but they are not in the "

2058 "access list or defs list");

2059 if (MA) {

2060

2062 if (MemoryAccess *MD = dyn_cast(MA)) {

2063

2065

2066 for (const Use &U : MD->uses()) {

2068 "Memory Def does not dominate it's uses");

2069 (void)U;

2070 }

2071 }

2072

2075 }

2076 }

2077

2078

2079 if (!AL && DL)

2080 continue;

2081

2082 assert(AL->size() == ActualAccesses.size() &&

2083 "We don't have the same number of accesses in the block as on the "

2084 "access list");

2086 "Either we should have a defs list, or we should have no defs");

2088 "We don't have the same number of defs in the block as on the "

2089 "def list");

2090 auto ALI = AL->begin();

2091 auto AAI = ActualAccesses.begin();

2092 while (ALI != AL->end() && AAI != ActualAccesses.end()) {

2093 assert(&*ALI == *AAI && "Not the same accesses in the same order");

2094 ++ALI;

2095 ++AAI;

2096 }

2097 ActualAccesses.clear();

2098 if (DL) {

2100 auto ADI = ActualDefs.begin();

2101 while (DLI != DL->end() && ADI != ActualDefs.end()) {

2102 assert(&*DLI == *ADI && "Not the same defs in the same order");

2103 ++DLI;

2104 ++ADI;

2105 }

2106 }

2107 ActualDefs.clear();

2108 }

2109}

2110

2111

2112

2114

2115 if (!Def)

2117 "Null def but use not point to live on entry def");

2118 else

2120 "Did not find use in def's use list");

2121}

2122

2123

2124

2125

2126

2127

2128

2129void MemorySSA::renumberBlock(const BasicBlock *B) const {

2130

2131 unsigned long CurrentNumber = 0;

2133 assert(AL != nullptr && "Asking to renumber an empty block");

2134 for (const auto &I : *AL)

2135 BlockNumbering[&I] = ++CurrentNumber;

2136 BlockNumberingValid.insert(B);

2137}

2138

2139

2140

2141

2145

2147 "Asking for local domination when accesses are in different blocks!");

2148

2149 if (Dominatee == Dominator)

2150 return true;

2151

2152

2153

2155 return false;

2156

2157

2158

2160 return true;

2161

2162 if (!BlockNumberingValid.count(DominatorBlock))

2163 renumberBlock(DominatorBlock);

2164

2165 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);

2166

2167 assert(DominatorNum != 0 && "Block was not numbered properly");

2168 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);

2169 assert(DominateeNum != 0 && "Block was not numbered properly");

2170 return DominatorNum < DominateeNum;

2171}

2172

2175 if (Dominator == Dominatee)

2176 return true;

2177

2179 return false;

2180

2184}

2185

2187 const Use &Dominatee) const {

2188 if (MemoryPhi *MP = dyn_cast(Dominatee.getUser())) {

2189 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);

2190

2191 if (UseBB != Dominator->getBlock())

2193

2194 return locallyDominates(Dominator, cast(Dominatee));

2195 }

2196

2197 return dominates(Dominator, cast(Dominatee.getUser()));

2198}

2199

2201 if (IsOptimized)

2202 return;

2203

2208 IsOptimized = true;

2209}

2210

2212 switch (getValueID()) {

2213 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);

2214 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);

2215 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);

2216 }

2218}

2219

2222

2224 if (A && A->getID())

2225 OS << A->getID();

2226 else

2228 };

2229

2230 OS << getID() << " = MemoryDef(";

2231 printID(UO);

2232 OS << ")";

2233

2234 if (isOptimized()) {

2235 OS << "->";

2236 printID(getOptimized());

2237 }

2238}

2239

2241 ListSeparator LS(",");

2242 OS << getID() << " = MemoryPhi(";

2243 for (const auto &Op : operands()) {

2246

2247 OS << LS << '{';

2250 else

2252 OS << ',';

2253 if (unsigned ID = MA->getID())

2255 else

2257 OS << '}';

2258 }

2259 OS << ')';

2260}

2261

2264 OS << "MemoryUse(";

2265 if (UO && UO->getID())

2267 else

2269 OS << ')';

2270}

2271

2273

2274#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)

2276 dbgs() << "\n";

2277#endif

2278}

2279

2281private:

2283 MemorySSAAnnotatedWriter MSSAWriter;

2284

2285public:

2287 : F(F), MSSAWriter(&MSSA) {}

2288

2290 MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; }

2291};

2292

2293namespace llvm {

2294

2295template <>

2299 }

2300

2301

2303

2306 }

2307

2310 }

2311

2314 }

2315};

2316

2317template <>

2319

2321

2324 "' function";

2325 }

2326

2329 Node, nullptr,

2332 },

2333 [](std::string &S, unsigned &I, unsigned Idx) -> void {

2334 std::string Str = S.substr(I, Idx - I);

2336 if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") ||

2337 SR.count("MemoryUse("))

2338 return;

2340 });

2341 }

2342

2346 }

2347

2348

2351 return "";

2352 }

2353

2356 return getNodeLabel(Node, CFGInfo).find(';') != std:🧵:npos

2357 ? "style=filled, fillcolor=lightpink"

2358 : "";

2359 }

2360};

2361

2362}

2363

2365

2371}

2372

2380}

2381

2385 if (EnsureOptimizedUses)

2390 } else {

2391 OS << "MemorySSA for function: " << F.getName() << "\n";

2393 }

2394

2396}

2397

2401 OS << "MemorySSA (walker) for function: " << F.getName() << "\n";

2402 MemorySSAWalkerAnnotatedWriter Writer(&MSSA);

2403 F.print(OS, &Writer);

2404

2406}

2407

2411

2413}

2414

2416

2419}

2420

2422

2427}

2428

2430 auto &DT = getAnalysis().getDomTree();

2431 auto &AA = getAnalysis().getAAResults();

2432 MSSA.reset(new MemorySSA(F, &AA, &DT));

2433 return false;

2434}

2435

2439}

2440

2443}

2444

2446

2447

2448

2449

2450

2454 assert(!isa(StartingAccess) && "Use cannot be defining access");

2455

2456

2457 if (Loc.Ptr == nullptr)

2458 return StartingAccess;

2459

2461 if (auto *StartingUseOrDef = dyn_cast(StartingAccess)) {

2463 return StartingUseOrDef;

2464

2465 I = StartingUseOrDef->getMemoryInst();

2466

2467

2468

2469 if (!isa(I) && I->isFenceLike())

2470 return StartingUseOrDef;

2471 }

2472

2473 UpwardsMemoryQuery Q;

2474 Q.OriginalAccess = StartingAccess;

2475 Q.StartingLoc = Loc;

2476 Q.Inst = nullptr;

2477 Q.IsCall = false;

2478

2479

2480

2481

2483 Walker.findClobber(BAA, StartingAccess, Q, UpwardWalkLimit);

2485 dbgs() << "Clobber starting at access " << *StartingAccess << "\n";

2486 if (I)

2487 dbgs() << " for instruction " << *I << "\n";

2488 dbgs() << " is " << *Clobber << "\n";

2489 });

2490 return Clobber;

2491}

2492

2495 if (I.hasMetadata(LLVMContext::MD_invariant_group) || I.isVolatile())

2496 return nullptr;

2497

2498

2499

2500

2502

2503

2504

2505

2506

2507 if (isa(PointerOperand))

2508 return nullptr;

2509

2510 const Instruction *MostDominatingInstruction = &I;

2511

2512 for (const User *Us : PointerOperand->users()) {

2513 auto *U = dyn_cast(Us);

2514 if (!U || U == &I || !DT.dominates(U, MostDominatingInstruction))

2515 continue;

2516

2517

2518

2519

2520 if (U->hasMetadata(LLVMContext::MD_invariant_group) &&

2522 MostDominatingInstruction = U;

2523 }

2524 }

2525

2526 return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction;

2527}

2528

2531 bool SkipSelf, bool UseInvariantGroup) {

2532 auto *StartingAccess = dyn_cast(MA);

2533

2534 if (!StartingAccess)

2535 return MA;

2536

2537 if (UseInvariantGroup) {

2539 *StartingAccess->getMemoryInst(), MSSA->getDomTree())) {

2540 assert(isa(I) || isa(I));

2541

2544 if (isa(ClobberMA))

2545 return ClobberMA->getDefiningAccess();

2546 return ClobberMA;

2547 }

2548 }

2549

2550 bool IsOptimized = false;

2551

2552

2553

2554

2555 if (StartingAccess->isOptimized()) {

2556 if (!SkipSelf || !isa(StartingAccess))

2557 return StartingAccess->getOptimized();

2558 IsOptimized = true;

2559 }

2560

2561 const Instruction *I = StartingAccess->getMemoryInst();

2562

2563

2564

2565 if (!isa(I) && I->isFenceLike())

2566 return StartingAccess;

2567

2568 UpwardsMemoryQuery Q(I, StartingAccess);

2569

2572 StartingAccess->setOptimized(LiveOnEntry);

2573 return LiveOnEntry;

2574 }

2575

2577 if (!IsOptimized) {

2578

2579 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();

2580

2581

2582

2584 StartingAccess->setOptimized(DefiningAccess);

2585 return DefiningAccess;

2586 }

2587

2588 OptimizedAccess =

2589 Walker.findClobber(BAA, DefiningAccess, Q, UpwardWalkLimit);

2590 StartingAccess->setOptimized(OptimizedAccess);

2591 } else

2592 OptimizedAccess = StartingAccess->getOptimized();

2593

2594 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");

2596 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");

2598

2600 if (SkipSelf && isa(OptimizedAccess) &&

2601 isa(StartingAccess) && UpwardWalkLimit) {

2602 assert(isa(Q.OriginalAccess));

2603 Q.SkipSelfAccess = true;

2604 Result = Walker.findClobber(BAA, OptimizedAccess, Q, UpwardWalkLimit);

2605 } else

2606 Result = OptimizedAccess;

2607

2608 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);

2609 LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");

2610

2611 return Result;

2612}

2613

2617 if (auto *Use = dyn_cast(MA))

2618 return Use->getDefiningAccess();

2619 return MA;

2620}

2621

2624 if (auto *Use = dyn_cast(StartingAccess))

2625 return Use->getDefiningAccess();

2626 return StartingAccess;

2627}

2628

2629void MemoryPhi::deleteMe(DerivedUser *Self) {

2630 delete static_cast<MemoryPhi *>(Self);

2631}

2632

2633void MemoryDef::deleteMe(DerivedUser *Self) {

2634 delete static_cast<MemoryDef *>(Self);

2635}

2636

2637void MemoryUse::deleteMe(DerivedUser *Self) {

2638 delete static_cast<MemoryUse *>(Self);

2639}

2640

2641bool upward_defs_iterator::IsGuaranteedLoopInvariant(const Value *Ptr) const {

2642 auto IsGuaranteedLoopInvariantBase = [](const Value *Ptr) {

2643 Ptr = Ptr->stripPointerCasts();

2644 if (!isa(Ptr))

2645 return true;

2646 return isa(Ptr);

2647 };

2648

2649 Ptr = Ptr->stripPointerCasts();

2650 if (auto *I = dyn_cast(Ptr)) {

2651 if (I->getParent()->isEntryBlock())

2652 return true;

2653 }

2654 if (auto *GEP = dyn_cast(Ptr)) {

2655 return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&

2656 GEP->hasAllConstantIndices();

2657 }

2658 return IsGuaranteedLoopInvariantBase(Ptr);

2659}

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Atomic ordering constants.

BlockVerifier::State From

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

#define LLVM_DUMP_METHOD

Mark debug helper function definitions like dump() that should not be stripped from debug builds.

#define LLVM_ATTRIBUTE_UNUSED

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

This file defines DenseMapInfo traits for DenseMap.

This file defines the DenseMap class.

This file defines the DenseSet and SmallDenseSet classes.

This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.

std::optional< std::vector< StOtherPiece > > Other

DenseMap< Block *, BlockRelaxAux > Blocks

This header defines various interfaces for pass management in LLVM.

This defines the Use class.

This file provides utility analysis objects describing memory locations.

static bool instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysisType &AA)

Memory static true cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))

static cl::opt< bool, true > VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), cl::Hidden, cl::desc("Enable verification of MemorySSA."))

static const char LiveOnEntryStr[]

static LLVM_ATTRIBUTE_UNUSED void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, BatchAAResults &AA, bool AllowImpreciseClobber=false)

Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...

static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, const Instruction *I)

static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)

This does one-way checks to see if Use could theoretically be hoisted above MayClobber.

static const Instruction * getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT)

static cl::opt< std::string > DotCFGMSSA("dot-cfg-mssa", cl::value_desc("file name for generated dot file"), cl::desc("file name for generated dot file"), cl::init(""))

static bool isOrdered(const Instruction *I)

This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...

static std::string getNodeLabel(const ValueInfo &VI, GlobalValueSummary *GVS)

uint64_t IntrinsicInst * II

#define INITIALIZE_PASS_DEPENDENCY(depName)

#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)

#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

This file defines the SmallPtrSet class.

This file defines the SmallVector class.

DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)

const Function * getFunction()

MemorySSAAnnotatedWriter & getWriter()

A manager for alias analyses.

A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.

This templated class represents "all analyses that operate over " (e....

API to communicate dependencies between analyses during invalidation.

bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)

Trigger the invalidation of some other analysis pass if not already handled and return whether it was...

A container for analyses that lazily runs them and caches their results.

PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)

Get the result of an analysis pass for a given IR unit.

Represent the analysis usage information of a pass.

void setPreservesAll()

Set by analyses that do not transform their input at all.

AnalysisUsage & addRequiredTransitive()

virtual void emitBasicBlockStartAnnot(const BasicBlock *, formatted_raw_ostream &)

emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...

virtual void emitInstructionAnnot(const Instruction *, formatted_raw_ostream &)

emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...

LLVM Basic Block Representation.

void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW=nullptr, bool ShouldPreserveUseListOrder=false, bool IsForDebug=false) const

Print the basic block to an output stream with an optional AssemblyAnnotationWriter.

const Function * getParent() const

Return the enclosing method, or null if none.

LLVMContext & getContext() const

Get the context in which this basic block lives.

This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

This class represents an Operation in the Expression.

iterator find(const_arg_type_t< KeyT > Val)

bool erase(const KeyT &Val)

std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)

Implements a dense probed hash-table based set.

Extension point for the Value hierarchy.

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *, BatchAAResults &) override

Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...

typename SmallVector< DomTreeNodeBase *, 4 >::const_iterator const_iterator

Analysis pass which computes a DominatorTree.

DomTreeNodeBase< NodeT > * getRootNode()

getRootNode - This returns the entry node for the CFG of the function.

DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const

getNode - return the (Post)DominatorTree node for the specified basic block.

Legacy analysis pass which computes a DominatorTree.

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

bool isReachableFromEntry(const Use &U) const

Provide an overload for a Use.

bool dominates(const BasicBlock *BB, const Use &U) const

Return true if the (end of the) basic block BB dominates the use U.

FunctionPass class - This class is used to implement most global optimizations.

const BasicBlock & getEntryBlock() const

A wrapper class for inspecting calls to intrinsic functions.

An instruction for reading from memory.

bool isVolatile() const

Return true if this is a load from a volatile memory location.

AtomicOrdering getOrdering() const

Returns the ordering constraint of this load instruction.

void getExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const

Return all of the successor blocks of this loop.

BlockT * getHeader() const

iterator_range< block_iterator > blocks() const

BlockT * getLoopPreheader() const

If there is a preheader for this loop, return it.

Represents a single loop in the control flow graph.

BasicBlock * getBlock() const

void print(raw_ostream &OS) const

unsigned getID() const

Used for debugging and tracking things about MemoryAccesses.

void setBlock(BasicBlock *BB)

Used by MemorySSA to change the block of a MemoryAccess when it is moved.

Represents a read-write access to memory, whether it is a must-alias, or a may-alias.

void print(raw_ostream &OS) const

Representation for a specific memory location.

static MemoryLocation get(const LoadInst *LI)

Return a location with information about the memory reference by the given instruction.

const Value * Ptr

The address of the start of the location.

Represents phi nodes for memory accesses.

void print(raw_ostream &OS) const

An analysis that produces MemorySSA for a function.

Result run(Function &F, FunctionAnalysisManager &AM)

PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)

static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)

PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)

This is the generic walker interface for walkers of MemorySSA.

MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)

Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...

MemorySSAWalker(MemorySSA *)

virtual void invalidateInfo(MemoryAccess *)

Given a memory access, invalidate anything this walker knows about that access.

Legacy analysis pass which computes MemorySSA.

void verifyAnalysis() const override

verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...

void releaseMemory() override

releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...

bool runOnFunction(Function &) override

runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...

void print(raw_ostream &OS, const Module *M=nullptr) const override

print - Print out the internal state of the pass.

A MemorySSAWalker that does AA walks to disambiguate accesses.

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override

Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...

MemoryAccess * getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)

void invalidateInfo(MemoryAccess *MA) override

Given a memory access, invalidate anything this walker knows about that access.

~CachingWalker() override=default

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)

CachingWalker(MemorySSA *M, ClobberWalkerBase *W)

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override

Given a potentially clobbering memory access and a new location, calling this will give you the neare...

ClobberWalkerBase(MemorySSA *M, DominatorTree *D)

MemoryAccess * getClobberingMemoryAccessBase(MemoryAccess *, const MemoryLocation &, BatchAAResults &, unsigned &)

Walk the use-def chains starting at StartingAccess and find the MemoryAccess that actually clobbers L...

This class is a batch walker of all MemoryUse's in the program, and points their defining access at t...

void optimizeUses()

Optimize uses to point to their actual clobbering definitions.

OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA, DominatorTree *DT)

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override

Given a potentially clobbering memory access and a new location, calling this will give you the neare...

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)

~SkipSelfWalker() override=default

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)

SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)

MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override

Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...

void invalidateInfo(MemoryAccess *MA) override

Given a memory access, invalidate anything this walker knows about that access.

Encapsulates MemorySSA, including all data associated with memory accesses.

void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)

simple_ilist< MemoryAccess, ilist_tag< MSSAHelpers::DefsOnlyTag > > DefsList

const AccessList * getBlockAccesses(const BasicBlock *BB) const

Return the list of MemoryAccess's for a given basic block.

void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)

MemorySSAWalker * getSkipSelfWalker()

MemorySSA(Function &, AliasAnalysis *, DominatorTree *)

bool dominates(const MemoryAccess *A, const MemoryAccess *B) const

Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...

void verifyOrderingDominationAndDefUses(IterT Blocks, VerificationLevel=VerificationLevel::Fast) const

Verify ordering: the order and existence of MemoryAccesses matches the order and existence of memory ...

AccessList * getWritableBlockAccesses(const BasicBlock *BB) const

void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const

Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...

void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)

MemorySSAWalker * getWalker()

InsertionPlace

Used in various insertion functions to specify whether we are talking about the beginning or end of a...

void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)

MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)

DefsList * getWritableBlockDefs(const BasicBlock *BB) const

DominatorTree & getDomTree() const

MemoryUseOrDef * getMemoryAccess(const Instruction *I) const

Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.

MemoryAccess * getLiveOnEntryDef() const

void removeFromLookups(MemoryAccess *)

Properly remove MA from all of MemorySSA's lookup tables.

void ensureOptimizedUses()

By default, uses are not optimized during MemorySSA construction.

void verifyDominationNumbers(IterT Blocks) const

Verify that all of the blocks we believe to have valid domination numbers actually have valid dominat...

void verifyPrevDefInPhis(IterT Blocks) const

const DefsList * getBlockDefs(const BasicBlock *BB) const

Return the list of MemoryDef's and MemoryPhi's for a given basic block.

bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const

Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...

void removeFromLists(MemoryAccess *, bool ShouldDelete=true)

Properly remove MA from all of MemorySSA's lists.

bool isLiveOnEntryDef(const MemoryAccess *MA) const

Return true if MA represents the live on entry value.

iplist< MemoryAccess, ilist_tag< MSSAHelpers::AllAccessTag > > AccessList

void print(raw_ostream &) const

Class that has the common methods + fields of memory uses/defs.

MemoryAccess * getDefiningAccess() const

Get the access that produces the memory state used by this Use.

void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)

void setOptimized(MemoryAccess *)

Sets the optimized use for a MemoryDef.

Instruction * getMemoryInst() const

Get the instruction that this MemoryUse represents.

Represents read-only accesses to memory.

void print(raw_ostream &OS) const

A Module instance is used to store all the information related to an LLVM module.

static PassRegistry * getPassRegistry()

getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...

A set of analyses that are preserved following a run of a transformation pass.

static PreservedAnalyses all()

Construct a special preserved set that preserves all passes.

PreservedAnalysisChecker getChecker() const

Build a checker for this PreservedAnalyses and the specified analysis type.

A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...

bool erase(PtrType Ptr)

Remove pointer from the set.

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

StringRef - Represent a constant reference to a string, i.e.

std::string str() const

str - Get the contents as an std::string.

size_t count(char C) const

Return the number of occurrences of C in the string.

Target - Wrapper for Target specific information.

A Use represents the edge between a Value definition and its users.

User * getUser() const

Returns the User that contains this Use.

void dropAllReferences()

Drop all references to operands.

LLVM Value Representation.

iterator_range< user_iterator > users()

void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const

Print the name of this Value out to the specified raw_ostream.

const Value * stripPointerCasts() const

Strip off pointer casts, all-zero GEPs and address space casts.

iterator_range< use_iterator > uses()

StringRef getName() const

Return a constant reference to the value's name.

std::pair< iterator, bool > insert(const ValueT &V)

formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...

An opaque object representing a hash code.

base_list_type::iterator iterator

An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...

CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...

A range adaptor for a pair of iterators.

This class implements an extremely fast bulk output stream that can only output to a stream.

A raw_ostream that writes to an std::string.

A simple intrusive list implementation.

reverse_iterator rbegin()

This class provides various memory handling functions that manipulate MemoryBlock instances.

This provides a very simple, boring adaptor for a begin and end iterator into a range type.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

@ C

The default llvm calling convention, compatible with C.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

initializer< Ty > init(const Ty &Val)

LocationClass< Ty > location(Ty &L)

NodeAddr< PhiNode * > Phi

NodeAddr< DefNode * > Def

This is an optimization pass for GlobalISel generic memory operations.

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

void initializeMemorySSAWrapperPassPass(PassRegistry &)

APInt operator*(APInt a, uint64_t RHS)

auto successors(const MachineBasicBlock *BB)

const Value * getLoadStorePointerOperand(const Value *V)

A helper function that returns the pointer operand of a load or store instruction.

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)

Implements IEEE 754-2019 maximum semantics.

iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)

Make a range that does early increment to allow mutation of the underlying range without disrupting i...

auto pred_size(const MachineBasicBlock *BB)

raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")

upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT)

bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)

auto map_range(ContainerTy &&C, FuncTy F)

iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)

bool isModSet(const ModRefInfo MRI)

auto find_if_not(R &&Range, UnaryPredicate P)

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)

bool isModOrRefSet(const ModRefInfo MRI)

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

ModRefInfo

Flags indicating whether a memory access modifies or references memory.

@ ModRef

The access may reference and may modify the value stored in memory.

@ First

Helpers to iterate all locations in the MemoryEffectsBase class.

bool VerifyMemorySSA

Enables verification of MemorySSA.

std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair

upward_defs_iterator upward_defs_end()

auto find_if(R &&Range, UnaryPredicate P)

Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.

auto predecessors(const MachineBasicBlock *BB)

bool is_contained(R &&Range, const E &Element)

Returns true if Element is found in Range.

iterator_range< df_iterator< T > > depth_first(const T &G)

hash_code hash_combine(const Ts &...args)

Combine values into a single hash_code.

bool isRefSet(const ModRefInfo MRI)

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

A special type used by analysis passes to provide an address that identifies that particular analysis...

std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I, DOTFuncMSSAInfo *CFGInfo)

Display the raw branch weights from PGO.

std::string getNodeAttributes(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)

static std::string getEdgeSourceLabel(const BasicBlock *Node, const_succ_iterator I)

std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)

static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo)

DOTGraphTraits(bool IsSimple=false)

DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...

Description of the encoding of one expression Op.

DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...

static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)

static unsigned getHashValue(const MemoryLocOrCall &MLOC)

static MemoryLocOrCall getEmptyKey()

static MemoryLocOrCall getTombstoneKey()

An information struct used to provide DenseMap with the various necessary components for a given valu...

static size_t size(DOTFuncMSSAInfo *CFGInfo)

static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo)

static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo)

static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo)

bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)

PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)