LLVM: lib/Transforms/Scalar/GVN.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
77#include
78#include
79#include
80#include
81#include
82
83using namespace llvm;
87
88#define DEBUG_TYPE "gvn"
89
90STATISTIC(NumGVNInstr, "Number of instructions deleted");
91STATISTIC(NumGVNLoad, "Number of loads deleted");
92STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
93STATISTIC(NumGVNBlocks, "Number of blocks merged");
94STATISTIC(NumGVNSimpl, "Number of instructions simplified");
95STATISTIC(NumGVNEqProp, "Number of equalities propagated");
96STATISTIC(NumPRELoad, "Number of loads PRE'd");
97STATISTIC(NumPRELoopLoad, "Number of loop loads PRE'd");
99 "Number of loads moved to predecessor of a critical edge in PRE");
100
101STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
102 "Number of blocks speculated as available in "
103 "IsValueFullyAvailableInBlock(), max");
105 "Number of times we we reached gvn-max-block-speculations cut-off "
106 "preventing further exploration");
107
118
121 cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
122
123
126 cl::desc("Max number of blocks we're willing to speculate on (and recurse "
127 "into) when deducing if a value is fully available or not in GVN "
128 "(default = 600)"));
129
132 cl::desc("Max number of visited instructions when trying to find "
133 "dominating value of select dependency (default = 100)"));
134
137 cl::desc("Max number of instructions to scan in each basic block in GVN "
138 "(default = 100)"));
139
143
144
147
149
151
154 return false;
156 return true;
158 return false;
160 return false;
161 if ((.isEmpty() ||
.Attrs.isEmpty()) &&
162 .intersectWith(Ty->getContext(), Other.Attrs).has_value())
163 return false;
164 return true;
165 }
166
171};
172
176
179
180 return static_cast<unsigned>(hash_value(E));
181 }
182
185 return LHS == RHS;
186 }
187};
188
189
190
191
192
203
204
206
208
209
211
213
216 Res.Val = V;
219 return Res;
220 }
221
229
232 Res.Val = Load;
235 return Res;
236 }
237
240 Res.Val = nullptr;
243 return Res;
244 }
245
248 Res.Val = Sel;
253 return Res;
254 }
255
261
266
271
276
281
282
283
285};
286
287
288
290
292
293
295
300 return Res;
301 }
302
304 unsigned Offset = 0) {
306 }
307
311
316
317
318
320 return AV.MaterializeAdjustedValue(Load, BB->getTerminator());
321 }
322};
323
324
325
326
327
330 E.Ty = I->getType();
331 E.Opcode = I->getOpcode();
333
334
335
336 E.VarArgs.push_back(lookupOrAdd(GCR->getOperand(0)));
337 E.VarArgs.push_back(lookupOrAdd(GCR->getBasePtr()));
338 E.VarArgs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
339 } else {
340 for (Use &Op : I->operands())
342 }
343 if (I->isCommutative()) {
344
345
346
347
348 assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
349 if (E.VarArgs[0] > E.VarArgs[1])
351 E.Commutative = true;
352 }
353
355
357 if (E.VarArgs[0] > E.VarArgs[1]) {
360 }
361 E.Opcode = (C->getOpcode() << 8) | Predicate;
362 E.Commutative = true;
364 E.VarArgs.append(IVI->idx_begin(), IVI->idx_end());
366 ArrayRef ShuffleMask = SVI->getShuffleMask();
367 E.VarArgs.append(ShuffleMask.begin(), ShuffleMask.end());
369 E.Attrs = CB->getAttributes();
370 }
371
372 return E;
373}
374
375GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
377 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
378 "Not a comparison!");
381 E.VarArgs.push_back(lookupOrAdd(LHS));
382 E.VarArgs.push_back(lookupOrAdd(RHS));
383
384
385 if (E.VarArgs[0] > E.VarArgs[1]) {
388 }
389 E.Opcode = (Opcode << 8) | Predicate;
390 E.Commutative = true;
391 return E;
392}
393
394GVNPass::Expression
395GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
396 assert(EI && "Not an ExtractValueInst?");
399 E.Opcode = 0;
400
403
404
405
407 E.VarArgs.push_back(lookupOrAdd(WO->getLHS()));
408 E.VarArgs.push_back(lookupOrAdd(WO->getRHS()));
409 return E;
410 }
411
412
413
416 E.VarArgs.push_back(lookupOrAdd(Op));
417
419
420 return E;
421}
422
423GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *GEP) {
425 Type *PtrTy = GEP->getType()->getScalarType();
426 const DataLayout &DL = GEP->getDataLayout();
427 unsigned BitWidth = DL.getIndexTypeSizeInBits(PtrTy);
428 SmallMapVector<Value *, APInt, 4> VariableOffsets;
429 APInt ConstantOffset(BitWidth, 0);
430 if (GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) {
431
432
433 LLVMContext &Context = GEP->getContext();
434 E.Opcode = GEP->getOpcode();
435 E.Ty = nullptr;
436 E.VarArgs.push_back(lookupOrAdd(GEP->getPointerOperand()));
437 for (const auto &[V, Scale] : VariableOffsets) {
438 E.VarArgs.push_back(lookupOrAdd(V));
439 E.VarArgs.push_back(lookupOrAdd(ConstantInt::get(Context, Scale)));
440 }
441 if (!ConstantOffset.isZero())
442 E.VarArgs.push_back(
443 lookupOrAdd(ConstantInt::get(Context, ConstantOffset)));
444 } else {
445
446
447 E.Opcode = GEP->getOpcode();
448 E.Ty = GEP->getSourceElementType();
449 for (Use &Op : GEP->operands())
450 E.VarArgs.push_back(lookupOrAdd(Op));
451 }
452 return E;
453}
454
455
456
457
458
459GVNPass::ValueTable::ValueTable() = default;
460GVNPass::ValueTable::ValueTable(const ValueTable &) = default;
461GVNPass::ValueTable::ValueTable(ValueTable &&) = default;
462GVNPass::ValueTable::~ValueTable() = default;
463GVNPass::ValueTable &
465
466
468 ValueNumbering.insert(std::make_pair(V, Num));
470 NumberingPhi[Num] = PN;
471}
472
473
474
475
476
477
478
480 assert(MSSA && "addMemoryStateToExp should not be called without MemorySSA");
481 assert(MSSA->getMemoryAccess(I) && "Instruction does not access memory");
482 MemoryAccess *MA = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(I);
483 Exp.VarArgs.push_back(lookupOrAdd(MA));
484}
485
487
488
489
490
491
492
493
494 if (C->getFunction()->isPresplitCoroutine()) {
495 ValueNumbering[C] = NextValueNumber;
496 return NextValueNumber++;
497 }
498
499
500
501
502 if (C->isConvergent()) {
503 ValueNumbering[C] = NextValueNumber;
504 return NextValueNumber++;
505 }
506
507 if (AA->doesNotAccessMemory(C)) {
509 uint32_t E = assignExpNewValueNum(Exp).first;
511 return E;
512 }
513
514 if (MD && AA->onlyReadsMemory(C)) {
516 auto [E, IsValNumNew] = assignExpNewValueNum(Exp);
517 if (IsValNumNew) {
519 return E;
520 }
521
522 MemDepResult LocalDep = MD->getDependency(C);
523
525 ValueNumbering[C] = NextValueNumber;
526 return NextValueNumber++;
527 }
528
529 if (LocalDep.isDef()) {
530
531
533
534 if (!LocalDepCall || LocalDepCall->arg_size() != C->arg_size()) {
535 ValueNumbering[C] = NextValueNumber;
536 return NextValueNumber++;
537 }
538
539 for (unsigned I = 0, E = C->arg_size(); I < E; ++I) {
540 uint32_t CVN = lookupOrAdd(C->getArgOperand(I));
541 uint32_t LocalDepCallVN = lookupOrAdd(LocalDepCall->getArgOperand(I));
542 if (CVN != LocalDepCallVN) {
543 ValueNumbering[C] = NextValueNumber;
544 return NextValueNumber++;
545 }
546 }
547
548 uint32_t V = lookupOrAdd(LocalDepCall);
550 return V;
551 }
552
553
555 MD->getNonLocalCallDependency(C);
556
557 CallInst *CDep = nullptr;
558
559
560
561 for (const NonLocalDepEntry &I : Deps) {
562 if (I.getResult().isNonLocal())
563 continue;
564
565
566
567 if (.getResult().isDef() || CDep != nullptr) {
568 CDep = nullptr;
569 break;
570 }
571
573
574 if (NonLocalDepCall && DT->properlyDominates(I.getBB(), C->getParent())) {
575 CDep = NonLocalDepCall;
576 continue;
577 }
578
579 CDep = nullptr;
580 break;
581 }
582
583 if (!CDep) {
584 ValueNumbering[C] = NextValueNumber;
585 return NextValueNumber++;
586 }
587
588 if (CDep->arg_size() != C->arg_size()) {
589 ValueNumbering[C] = NextValueNumber;
590 return NextValueNumber++;
591 }
592 for (unsigned I = 0, E = C->arg_size(); I < E; ++I) {
593 uint32_t CVN = lookupOrAdd(C->getArgOperand(I));
594 uint32_t CDepVN = lookupOrAdd(CDep->getArgOperand(I));
595 if (CVN != CDepVN) {
596 ValueNumbering[C] = NextValueNumber;
597 return NextValueNumber++;
598 }
599 }
600
601 uint32_t V = lookupOrAdd(CDep);
603 return V;
604 }
605
606 if (MSSA && IsMSSAEnabled && AA->onlyReadsMemory(C)) {
608 addMemoryStateToExp(C, Exp);
609 auto [V, _] = assignExpNewValueNum(Exp);
611 return V;
612 }
613
614 ValueNumbering[C] = NextValueNumber;
615 return NextValueNumber++;
616}
617
618
619uint32_t GVNPass::ValueTable::computeLoadStoreVN(Instruction *I) {
620 if (!MSSA || !IsMSSAEnabled) {
621 ValueNumbering[I] = NextValueNumber;
622 return NextValueNumber++;
623 }
624
627 Exp.Opcode = I->getOpcode();
628 for (Use &Op : I->operands())
629 Exp.VarArgs.push_back(lookupOrAdd(Op));
630 addMemoryStateToExp(I, Exp);
631
632 auto [V, _] = assignExpNewValueNum(Exp);
634 return V;
635}
636
637
638bool GVNPass::ValueTable::exists(Value *V) const {
639 return ValueNumbering.contains(V);
640}
641
647
648
649
652 if (VI != ValueNumbering.end())
653 return VI->second;
654
656 if () {
657 ValueNumbering[V] = NextValueNumber;
660 return NextValueNumber++;
661 }
662
664 switch (I->getOpcode()) {
665 case Instruction::Call:
667 case Instruction::FNeg:
668 case Instruction::Add:
669 case Instruction::FAdd:
670 case Instruction::Sub:
671 case Instruction::FSub:
672 case Instruction::Mul:
673 case Instruction::FMul:
674 case Instruction::UDiv:
675 case Instruction::SDiv:
676 case Instruction::FDiv:
677 case Instruction::URem:
678 case Instruction::SRem:
679 case Instruction::FRem:
680 case Instruction::Shl:
681 case Instruction::LShr:
682 case Instruction::AShr:
683 case Instruction::And:
684 case Instruction::Or:
685 case Instruction::Xor:
686 case Instruction::ICmp:
687 case Instruction::FCmp:
688 case Instruction::Trunc:
689 case Instruction::ZExt:
690 case Instruction::SExt:
691 case Instruction::FPToUI:
692 case Instruction::FPToSI:
693 case Instruction::UIToFP:
694 case Instruction::SIToFP:
695 case Instruction::FPTrunc:
696 case Instruction::FPExt:
697 case Instruction::PtrToInt:
698 case Instruction::PtrToAddr:
699 case Instruction::IntToPtr:
700 case Instruction::AddrSpaceCast:
701 case Instruction::BitCast:
702 case Instruction::Select:
703 case Instruction::Freeze:
704 case Instruction::ExtractElement:
705 case Instruction::InsertElement:
706 case Instruction::ShuffleVector:
707 case Instruction::InsertValue:
708 Exp = createExpr(I);
709 break;
710 case Instruction::GetElementPtr:
712 break;
713 case Instruction::ExtractValue:
715 break;
716 case Instruction::PHI:
717 ValueNumbering[V] = NextValueNumber;
718 NumberingPhi[NextValueNumber] = cast(V);
719 return NextValueNumber++;
720 case Instruction::Load:
721 case Instruction::Store:
722 return computeLoadStoreVN(I);
723 default:
724 ValueNumbering[V] = NextValueNumber;
725 return NextValueNumber++;
726 }
727
728 uint32_t E = assignExpNewValueNum(Exp).first;
729 ValueNumbering[V] = E;
730 return E;
731}
732
733
734
738 assert(VI != ValueNumbering.end() && "Value not numbered?");
739 return VI->second;
740 }
741 return (VI != ValueNumbering.end()) ? VI->second : 0;
742}
743
744
745
746
747
748uint32_t GVNPass::ValueTable::lookupOrAddCmp(unsigned Opcode,
751 Expression Exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
752 return assignExpNewValueNum(Exp).first;
753}
754
755
757 ValueNumbering.clear();
758 ExpressionNumbering.clear();
759 NumberingPhi.clear();
760 NumberingBB.clear();
761 PhiTranslateTable.clear();
762 NextValueNumber = 1;
763 Expressions.clear();
764 ExprIdx.clear();
765 NextExprNumber = 0;
766}
767
768
770 uint32_t Num = ValueNumbering.lookup(V);
771 ValueNumbering.erase(V);
772
774 NumberingPhi.erase(Num);
776 NumberingBB.erase(Num);
777}
778
779
780
781void GVNPass::ValueTable::verifyRemoved(const Value *V) const {
782 assert(!ValueNumbering.contains(V) &&
783 "Inst still occurs in value numbering map!");
784}
785
786
787
788
789
790
792 LeaderListNode &Curr = NumToLeaders[N];
793 if (!Curr.Entry.Val) {
794 Curr.Entry.Val = V;
795 Curr.Entry.BB = BB;
796 return;
797 }
798
799 LeaderListNode *Node = TableAllocator.Allocate();
800 Node->Entry.Val = V;
801 Node->Entry.BB = BB;
802 Node->Next = Curr.Next;
803 Curr.Next = Node;
804}
805
806
807
810 LeaderListNode *Prev = nullptr;
811 LeaderListNode *Curr = &NumToLeaders[N];
812
813 while (Curr && (Curr->Entry.Val != I || Curr->Entry.BB != BB)) {
814 Prev = Curr;
815 Curr = Curr->Next;
816 }
817
818 if (!Curr)
819 return;
820
821 if (Prev) {
822 Prev->Next = Curr->Next;
823 } else {
824 if (!Curr->Next) {
825 Curr->Entry.Val = nullptr;
826 Curr->Entry.BB = nullptr;
827 } else {
828 LeaderListNode *Next = Curr->Next;
829 Curr->Entry.Val = Next->Entry.Val;
830 Curr->Entry.BB = Next->Entry.BB;
831 Curr->Next = Next->Next;
832 }
833 }
834}
835
836void GVNPass::LeaderMap::verifyRemoved(const Value *V) const {
837
838
839 for (const auto &I : NumToLeaders) {
840 (void)I;
841 assert(I.second.Entry.Val != V && "Inst still in value numbering scope!");
843 std::none_of(leader_iterator(&I.second), leader_iterator(nullptr),
844 [=](const LeaderTableEntry &E) { return E.Val == V; }) &&
845 "Inst still in value numbering scope!");
846 }
847}
848
849
850
851
852
856
860
864
866 return Options.AllowLoadPRESplitBackedge.value_or(
868}
869
873
877
879
880
881
882
887 auto *MemDep =
893 "On-demand computation of MemSSA implies that MemDep is disabled!");
895 }
897 bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
898 MSSA ? &MSSA->getMSSA() : nullptr);
904 if (MSSA)
907 return PA;
908}
909
913 OS, MapClassName2PassName);
914
915 OS << '<';
916 if (Options.AllowPRE != std::nullopt)
917 OS << (*Options.AllowPRE ? "" : "no-") << "pre;";
918 if (Options.AllowLoadPRE != std::nullopt)
919 OS << (*Options.AllowLoadPRE ? "" : "no-") << "load-pre;";
920 if (Options.AllowLoadPRESplitBackedge != std::nullopt)
921 OS << (*Options.AllowLoadPRESplitBackedge ? "" : "no-")
922 << "split-backedge-load-pre;";
923 if (Options.AllowMemDep != std::nullopt)
924 OS << (*Options.AllowMemDep ? "" : "no-") << "memdep;";
925 if (Options.AllowMemorySSA != std::nullopt)
926 OS << (*Options.AllowMemorySSA ? "" : "no-") << "memoryssa";
927 OS << '>';
928}
929
933 removeInstruction(I);
934}
935
936#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
938 errs() << "{\n";
939 for (const auto &[Num, Exp] : Map) {
940 errs() << Num << "\n";
941 Exp->dump();
942 }
943 errs() << "}\n";
944}
945#endif
946
948
950
952
953
954
955
957};
958
959
960
961
962
963
964
965
966
971 std::optional<BasicBlock *> UnavailableBB;
972
973
974
975 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
976
977#ifndef NDEBUG
980#endif
981
983 while (!Worklist.empty()) {
985
986
987 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
991
992
993 if (.second) {
995 UnavailableBB = CurrBB;
996 break;
997 }
998
999#ifndef NDEBUG
1001#endif
1002 continue;
1003 }
1004
1005
1006 ++NumNewNewSpeculativelyAvailableBBs;
1007 bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
1008
1009
1010
1011 if (OutOfBudget || pred_empty(CurrBB)) {
1012 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
1014 UnavailableBB = CurrBB;
1015 break;
1016 }
1017
1018
1019#ifndef NDEBUG
1020 NewSpeculativelyAvailableBBs.insert(CurrBB);
1021#endif
1022
1024 }
1025
1026#if LLVM_ENABLE_STATS
1027 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
1028 NumNewNewSpeculativelyAvailableBBs);
1029#endif
1030
1031
1032
1033 auto MarkAsFixpointAndEnqueueSuccessors =
1035 auto It = FullyAvailableBlocks.find(BB);
1036 if (It == FullyAvailableBlocks.end())
1037 return;
1041 return;
1043 State = FixpointState;
1044#ifndef NDEBUG
1045 assert(NewSpeculativelyAvailableBBs.erase(BB) &&
1046 "Found a speculatively available successor leftover?");
1047#endif
1048
1050 return;
1051 }
1052 };
1053
1054 if (UnavailableBB) {
1055
1056
1057
1058
1059 Worklist.clear();
1061 while (!Worklist.empty())
1062 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
1064 }
1065
1066#ifndef NDEBUG
1067 Worklist.clear();
1068 for (BasicBlock *AvailableBB : AvailableBBs)
1070 while (!Worklist.empty())
1071 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
1073
1074 assert(NewSpeculativelyAvailableBBs.empty() &&
1075 "Must have fixed all the new speculatively available blocks.");
1076#endif
1077
1078 return !UnavailableBB;
1079}
1080
1081
1082
1085 Value *NewValue) {
1087 if (V.AV.Val == OldValue)
1088 V.AV.Val = NewValue;
1089 if (V.AV.isSelectValue()) {
1090 if (V.AV.V1 == OldValue)
1091 V.AV.V1 = NewValue;
1092 if (V.AV.V2 == OldValue)
1093 V.AV.V2 = NewValue;
1094 }
1095 }
1096}
1097
1098
1099
1100
1105
1106
1107 if (ValuesPerBlock.size() == 1 &&
1109 Load->getParent())) {
1110 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
1111 "Dead BB dominate this block");
1112 return ValuesPerBlock[0].MaterializeAdjustedValue(Load);
1113 }
1114
1115
1118 SSAUpdate.Initialize(Load->getType(), Load->getName());
1119
1122
1123 if (AV.AV.isUndefValue())
1124 continue;
1125
1127 continue;
1128
1129
1130
1131
1132
1133 if (BB == Load->getParent() &&
1134 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
1135 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
1136 continue;
1137
1138 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load));
1139 }
1140
1141
1143}
1144
1148 Type *LoadTy = Load->getType();
1149 const DataLayout &DL = Load->getDataLayout();
1152 if (Res->getType() != LoadTy) {
1154
1157 << *Res << '\n'
1158 << "\n\n\n");
1159 }
1162 if (CoercedLoad->getType() == LoadTy && Offset == 0) {
1163 Res = CoercedLoad;
1165 } else {
1167 Load->getFunction());
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177 if (!CoercedLoad->hasMetadata(LLVMContext::MD_noundef))
1179 {LLVMContext::MD_dereferenceable,
1180 LLVMContext::MD_dereferenceable_or_null,
1181 LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
1184 << *Res << '\n'
1185 << "\n\n\n");
1186 }
1189 InsertPt, DL);
1192 << *Res << '\n'
1193 << "\n\n\n");
1195
1197 assert(V1 && V2 && "both value operands of the select must be present");
1198 Res =
1200
1201
1203 } else {
1204 llvm_unreachable("Should not materialize value from dead block");
1205 }
1206 assert(Res && "failed to materialize?");
1207 return Res;
1208}
1209
1212 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1213 return false;
1214}
1215
1216
1217
1221 return DT->dominates(From, Between);
1225}
1226
1229 Value *PtrOp = Load->getPointerOperand();
1231 return nullptr;
1232
1234
1235 for (auto *U : PtrOp->users()) {
1238 if (I->getFunction() == Load->getFunction() && DT->dominates(I, Load)) {
1239
1240 if (OtherAccess) {
1242 OtherAccess = I;
1243 else
1245 } else
1246 OtherAccess = I;
1247 }
1248 }
1249 }
1250
1251 if (OtherAccess)
1252 return OtherAccess;
1253
1254
1255
1256 for (auto *U : PtrOp->users()) {
1259 if (I->getFunction() == Load->getFunction() &&
1261 if (OtherAccess) {
1262 if (liesBetween(OtherAccess, I, Load, DT)) {
1263 OtherAccess = I;
1264 } else if ((I, OtherAccess, Load, DT)) {
1265
1266
1267 OtherAccess = nullptr;
1268 break;
1269 }
1270
1271 } else {
1272 OtherAccess = I;
1273 }
1274 }
1275 }
1276 }
1277
1278 return OtherAccess;
1279}
1280
1281
1282
1286 using namespace ore;
1287
1289 R << "load of type " << NV("Type", Load->getType()) << " not eliminated"
1290 << setExtraArgs();
1291
1293 if (OtherAccess)
1294 R << " in favor of " << NV("OtherAccess", OtherAccess);
1295
1296 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
1297
1298 ORE->emit(R);
1299}
1300
1301
1302
1305 uint32_t NumVisitedInsts = 0;
1309 for (auto *Inst = BB == FromBB ? From : BB->getTerminator();
1310 Inst != nullptr; Inst = Inst->getPrevNode()) {
1311
1313 return nullptr;
1315 return nullptr;
1317 if (LI->getPointerOperand() == Loc.Ptr && LI->getType() == LoadTy)
1318 return LI;
1319 }
1320 return nullptr;
1321}
1322
1323std::optional
1324GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1326 assert(Load->isUnordered() && "rules below are incorrect for ordered access");
1327 assert(DepInfo.isLocal() && "expected a local dependence");
1328
1330
1331 const DataLayout &DL = Load->getDataLayout();
1333
1334
1335
1337
1338 if (Address && Load->isAtomic() <= DepSI->isAtomic()) {
1343 }
1344 }
1345
1346
1347
1348
1349
1351
1352
1353
1354 if (DepLoad != Load && Address &&
1355 Load->isAtomic() <= DepLoad->isAtomic()) {
1356 Type *LoadType = Load->getType();
1358
1359
1362 DepLoad->getFunction())) {
1363 const auto ClobberOff = MD->getClobberOffset(DepLoad);
1364
1365 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1366 ? -1
1367 : *ClobberOff;
1368 }
1374 }
1375 }
1376
1377
1378
1382 DepMI, DL);
1385 }
1386 }
1387
1388
1390
1391 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1392 dbgs() << " is clobbered by " << *DepInst << '\n';);
1393 if (ORE->allowExtraAnalysis(DEBUG_TYPE))
1395
1396 return std::nullopt;
1397 }
1398 assert(DepInfo.isDef() && "follows from above");
1399
1400
1401
1404
1405 if (Constant *InitVal =
1408
1410
1411
1412
1414 S->getFunction()))
1415 return std::nullopt;
1416
1417
1418 if (S->isAtomic() < Load->isAtomic())
1419 return std::nullopt;
1420
1422 }
1423
1425
1426
1427
1429 LD->getFunction()))
1430 return std::nullopt;
1431
1432
1433 if (LD->isAtomic() < Load->isAtomic())
1434 return std::nullopt;
1435
1437 }
1438
1439
1440
1441
1443 assert(Sel->getType() == Load->getPointerOperandType());
1448 if (!V1)
1449 return std::nullopt;
1453 if (!V2)
1454 return std::nullopt;
1456 }
1457
1458
1460
1461 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1462 dbgs() << " has unknown def " << *DepInst << '\n';);
1463 return std::nullopt;
1464}
1465
1466void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1467 AvailValInBlkVect &ValuesPerBlock,
1468 UnavailBlkVect &UnavailableBlocks) {
1469
1470
1471
1472
1473 for (const auto &Dep : Deps) {
1475 MemDepResult DepInfo = Dep.getResult();
1476
1477 if (DeadBlocks.count(DepBB)) {
1478
1479
1481 continue;
1482 }
1483
1484 if (!DepInfo.isLocal()) {
1485 UnavailableBlocks.push_back(DepBB);
1486 continue;
1487 }
1488
1489
1490
1491
1492 if (auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1493
1494
1495
1496 ValuesPerBlock.push_back(
1498 } else {
1499 UnavailableBlocks.push_back(DepBB);
1500 }
1501 }
1502
1503 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1504 "post condition violation");
1505}
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526LoadInst *GVNPass::findLoadToHoistIntoPred(BasicBlock *Pred, BasicBlock *LoadBB,
1527 LoadInst *Load) {
1528
1530 if (Term->getNumSuccessors() != 2 || Term->isSpecialTerminator())
1531 return nullptr;
1532 auto *SuccBB = Term->getSuccessor(0);
1533 if (SuccBB == LoadBB)
1534 SuccBB = Term->getSuccessor(1);
1535 if (!SuccBB->getSinglePredecessor())
1536 return nullptr;
1537
1539 for (Instruction &Inst : *SuccBB) {
1540 if (Inst.isDebugOrPseudoInst())
1541 continue;
1542 if (--NumInsts == 0)
1543 return nullptr;
1544
1545 if (!Inst.isIdenticalTo(Load))
1546 continue;
1547
1548 MemDepResult Dep = MD->getDependency(&Inst);
1549
1550
1551
1552
1553 if (Dep.isNonLocal() && !ICF->isDominatedByICFIFromSameBlock(&Inst))
1555
1556
1557
1558 return nullptr;
1559 }
1560
1561 return nullptr;
1562}
1563
1564void GVNPass::eliminatePartiallyRedundantLoad(
1565 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1566 MapVector<BasicBlock *, Value *> &AvailableLoads,
1567 MapVector<BasicBlock *, LoadInst *> *CriticalEdgePredAndLoad) {
1568 for (const auto &AvailableLoad : AvailableLoads) {
1569 BasicBlock *UnavailableBlock = AvailableLoad.first;
1570 Value *LoadPtr = AvailableLoad.second;
1571
1572 auto *NewLoad = new LoadInst(
1573 Load->getType(), LoadPtr, Load->getName() + ".pre", Load->isVolatile(),
1574 Load->getAlign(), Load->getOrdering(), Load->getSyncScopeID(),
1576 NewLoad->setDebugLoc(Load->getDebugLoc());
1577 if (MSSAU) {
1578 auto *NewAccess = MSSAU->createMemoryAccessInBB(
1581 MSSAU->insertDef(NewDef, true);
1582 else
1583 MSSAU->insertUse(cast(NewAccess), true);
1584 }
1585
1586
1587 AAMDNodes Tags = Load->getAAMetadata();
1588 if (Tags)
1589 NewLoad->setAAMetadata(Tags);
1590
1591 if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
1592 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1593 if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
1594 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1595 if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
1596 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1597 if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
1598 if (LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1599 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1600
1601
1602
1603
1604
1605
1606
1607
1608 ValuesPerBlock.push_back(
1610 MD->invalidateCachedPointerInfo(LoadPtr);
1611 LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1612
1613
1614
1615 if (CriticalEdgePredAndLoad) {
1616 auto It = CriticalEdgePredAndLoad->find(UnavailableBlock);
1617 if (It != CriticalEdgePredAndLoad->end()) {
1618 ++NumPRELoadMoved2CEPred;
1619 ICF->insertInstructionTo(NewLoad, UnavailableBlock);
1620 LoadInst *OldLoad = It->second;
1624 if (uint32_t ValNo = VN.lookup(OldLoad, false))
1625 LeaderTable.erase(ValNo, OldLoad, OldLoad->getParent());
1626 removeInstruction(OldLoad);
1627 }
1628 }
1629 }
1630
1631
1633
1634 ICF->removeUsersOf(Load);
1635 Load->replaceAllUsesWith(V);
1637 V->takeName(Load);
1639 I->setDebugLoc(Load->getDebugLoc());
1640 if (V->getType()->isPtrOrPtrVectorTy())
1641 MD->invalidateCachedPointerInfo(V);
1642 ORE->emit([&]() {
1643 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
1644 << "load eliminated by PRE";
1645 });
1647}
1648
1649bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1650 UnavailBlkVect &UnavailableBlocks) {
1651
1652
1653
1654
1655
1656
1657
1658
1659 SmallPtrSet<BasicBlock *, 4> Blockers(llvm::from_range, UnavailableBlocks);
1660
1661
1662
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681 bool MustEnsureSafetyOfSpeculativeExecution =
1682 ICF->isDominatedByICFIFromSameBlock(Load);
1683
1686 if (TmpBB == LoadBB)
1687 return false;
1688 if (Blockers.count(TmpBB))
1689 return false;
1690
1691
1692
1693
1694
1695
1697 return false;
1698
1699
1700 MustEnsureSafetyOfSpeculativeExecution =
1701 MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1702 }
1703
1705 LoadBB = TmpBB;
1706
1707
1708
1709 MapVector<BasicBlock *, Value *> PredLoads;
1710 DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1711 for (const AvailableValueInBlock &AV : ValuesPerBlock)
1713 for (BasicBlock *UnavailableBB : UnavailableBlocks)
1715
1716
1718
1719
1720
1721 MapVector<BasicBlock *, LoadInst *> CriticalEdgePredAndLoad;
1722 for (BasicBlock *Pred : predecessors(LoadBB)) {
1723
1724
1727 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1728 << Pred->getName() << "': " << *Load << '\n');
1729 return false;
1730 }
1731
1733 continue;
1734 }
1735
1739 dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1740 << Pred->getName() << "': " << *Load << '\n');
1741 return false;
1742 }
1743
1744 if (LoadBB->isEHPad()) {
1746 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1747 << Pred->getName() << "': " << *Load << '\n');
1748 return false;
1749 }
1750
1751
1753 if (DT->dominates(LoadBB, Pred)) {
1756 << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1757 << Pred->getName() << "': " << *Load << '\n');
1758 return false;
1759 }
1760
1761 if (LoadInst *LI = findLoadToHoistIntoPred(Pred, LoadBB, Load))
1762 CriticalEdgePredAndLoad[Pred] = LI;
1763 else
1764 CriticalEdgePredSplit.push_back(Pred);
1765 } else {
1766
1767 PredLoads[Pred] = nullptr;
1768 }
1769 }
1770
1771
1772 unsigned NumInsertPreds = PredLoads.size() + CriticalEdgePredSplit.size();
1773 unsigned NumUnavailablePreds = NumInsertPreds +
1774 CriticalEdgePredAndLoad.size();
1775 assert(NumUnavailablePreds != 0 &&
1776 "Fully available value should already be eliminated!");
1777 (void)NumUnavailablePreds;
1778
1779
1780
1781
1782
1783 if (NumInsertPreds > 1)
1784 return false;
1785
1786
1787
1788 if (MustEnsureSafetyOfSpeculativeExecution) {
1789 if (CriticalEdgePredSplit.size())
1791 DT))
1792 return false;
1793 for (auto &PL : PredLoads)
1795 DT))
1796 return false;
1797 for (auto &CEP : CriticalEdgePredAndLoad)
1799 DT))
1800 return false;
1801 }
1802
1803
1804 for (BasicBlock *OrigPred : CriticalEdgePredSplit) {
1805 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1806 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1807 PredLoads[NewPred] = nullptr;
1808 LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1809 << LoadBB->getName() << '\n');
1810 }
1811
1812 for (auto &CEP : CriticalEdgePredAndLoad)
1813 PredLoads[CEP.first] = nullptr;
1814
1815
1816 bool CanDoPRE = true;
1817 const DataLayout &DL = Load->getDataLayout();
1818 SmallVector<Instruction*, 8> NewInsts;
1819 for (auto &PredLoad : PredLoads) {
1820 BasicBlock *UnavailablePred = PredLoad.first;
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830 Value *LoadPtr = Load->getPointerOperand();
1832 while (Cur != LoadBB) {
1833 PHITransAddr Address(LoadPtr, DL, AC);
1835 *DT, NewInsts);
1836 if (!LoadPtr) {
1837 CanDoPRE = false;
1838 break;
1839 }
1841 }
1842
1843 if (LoadPtr) {
1844 PHITransAddr Address(LoadPtr, DL, AC);
1845 LoadPtr = Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1846 NewInsts);
1847 }
1848
1849
1850 if (!LoadPtr) {
1851 LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1852 << *Load->getPointerOperand() << "\n");
1853 CanDoPRE = false;
1854 break;
1855 }
1856
1857 PredLoad.second = LoadPtr;
1858 }
1859
1860 if (!CanDoPRE) {
1861 while (!NewInsts.empty()) {
1862
1863
1864
1865
1866
1868 }
1869
1870
1871 return !CriticalEdgePredSplit.empty();
1872 }
1873
1874
1875
1876
1877 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n');
1879 << " INSTS: " << *NewInsts.back()
1880 << '\n');
1881
1882
1883 for (Instruction *I : NewInsts) {
1884
1885
1886
1887 I->updateLocationAfterHoist();
1888
1889
1890
1891
1892
1893 VN.lookupOrAdd(I);
1894 }
1895
1896 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads,
1897 &CriticalEdgePredAndLoad);
1898 ++NumPRELoad;
1899 return true;
1900}
1901
1902bool GVNPass::performLoopLoadPRE(LoadInst *Load,
1903 AvailValInBlkVect &ValuesPerBlock,
1904 UnavailBlkVect &UnavailableBlocks) {
1905 const Loop *L = LI->getLoopFor(Load->getParent());
1906
1907 if (!L || L->getHeader() != Load->getParent())
1908 return false;
1909
1910 BasicBlock *Preheader = L->getLoopPreheader();
1912 if (!Preheader || !Latch)
1913 return false;
1914
1915 Value *LoadPtr = Load->getPointerOperand();
1916
1917 if (->isLoopInvariant(LoadPtr))
1918 return false;
1919
1920
1921
1922
1923 if (ICF->isDominatedByICFIFromSameBlock(Load))
1924 return false;
1925
1927 for (auto *Blocker : UnavailableBlocks) {
1928
1929 if (->contains(Blocker))
1930 continue;
1931
1932
1933
1934
1935
1936
1937 if (LoopBlock)
1938 return false;
1939
1940
1941 if (L != LI->getLoopFor(Blocker))
1942 return false;
1943
1944
1945
1946
1947
1948
1949 if (DT->dominates(Blocker, Latch))
1950 return false;
1951
1952
1953 if (Blocker->getTerminator()->mayWriteToMemory())
1954 return false;
1955
1956 LoopBlock = Blocker;
1957 }
1958
1959 if (!LoopBlock)
1960 return false;
1961
1962
1963
1965 return false;
1966
1967
1968 MapVector<BasicBlock *, Value *> AvailableLoads;
1969 AvailableLoads[LoopBlock] = LoadPtr;
1970 AvailableLoads[Preheader] = LoadPtr;
1971
1972 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOOP LOAD: " << *Load << '\n');
1973 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads,
1974 nullptr);
1975 ++NumPRELoopLoad;
1976 return true;
1977}
1978
1981 using namespace ore;
1982
1983 ORE->emit([&]() {
1985 << "load of type " << NV("Type", Load->getType()) << " eliminated"
1986 << setExtraArgs() << " in favor of "
1988 });
1989}
1990
1991
1992
1993bool GVNPass::processNonLocalLoad(LoadInst *Load) {
1994
1995 if (Load->getParent()->getParent()->hasFnAttribute(
1996 Attribute::SanitizeAddress) ||
1997 Load->getParent()->getParent()->hasFnAttribute(
1998 Attribute::SanitizeHWAddress))
1999 return false;
2000
2001
2002 LoadDepVect Deps;
2003 MD->getNonLocalPointerDependency(Load, Deps);
2004
2005
2006
2007
2008 unsigned NumDeps = Deps.size();
2010 return false;
2011
2012
2013
2014 if (NumDeps == 1 &&
2015 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
2017 dbgs() << " has unknown dependencies\n";);
2018 return false;
2019 }
2020
2022
2023 if (GetElementPtrInst *GEP =
2025 for (Use &U : GEP->indices())
2027 Changed |= performScalarPRE(I);
2028 }
2029
2030
2031 AvailValInBlkVect ValuesPerBlock;
2032 UnavailBlkVect UnavailableBlocks;
2033 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
2034
2035
2036
2037 if (ValuesPerBlock.empty())
2039
2040
2041
2042
2043
2044
2045 if (UnavailableBlocks.empty()) {
2046 LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *Load << '\n');
2047
2048
2050
2051 ICF->removeUsersOf(Load);
2052 Load->replaceAllUsesWith(V);
2053
2055 V->takeName(Load);
2057
2058
2059
2060 if (Load->getDebugLoc() && Load->getParent() == I->getParent())
2061 I->setDebugLoc(Load->getDebugLoc());
2062 if (V->getType()->isPtrOrPtrVectorTy())
2063 MD->invalidateCachedPointerInfo(V);
2064 ++NumGVNLoad;
2067 return true;
2068 }
2069
2070
2075
2076 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
2077 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
2078 return true;
2079
2081}
2082
2083bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
2085
2087 if (Cond->isZero()) {
2090
2091
2092
2093 auto *NewS =
2096 if (MSSAU) {
2097 const MemoryUseOrDef *FirstNonDom = nullptr;
2098 const auto *AL =
2099 MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
2100
2101
2102
2103
2104
2105 if (AL) {
2106 for (const auto &Acc : *AL) {
2108 if (!Current->getMemoryInst()->comesBefore(NewS)) {
2109 FirstNonDom = Current;
2110 break;
2111 }
2112 }
2113 }
2114
2115 auto *NewDef =
2116 FirstNonDom ? MSSAU->createMemoryAccessBefore(
2117 NewS, nullptr,
2118 const_cast<MemoryUseOrDef *>(FirstNonDom))
2119 : MSSAU->createMemoryAccessInBB(
2120 NewS, nullptr,
2122
2123 MSSAU->insertDef(cast(NewDef), false);
2124 }
2125 }
2128 return true;
2129 }
2130 return false;
2131 }
2132
2134
2135
2136
2137 return false;
2138 }
2139
2141 return propagateEquality(V, True, IntrinsicI);
2142}
2143
2146 I->replaceAllUsesWith(Repl);
2147}
2148
2149
2150
2151bool GVNPass::processLoad(LoadInst *L) {
2152 if (!MD)
2153 return false;
2154
2155
2156 if (->isUnordered())
2157 return false;
2158
2159 if (L->getType()->isTokenLikeTy())
2160 return false;
2161
2162 if (L->use_empty()) {
2164 return true;
2165 }
2166
2167
2168 MemDepResult Dep = MD->getDependency(L);
2169
2170
2172 return processNonLocalLoad(L);
2173
2174
2176
2178
2179 dbgs() << "GVN: load "; L->printAsOperand(dbgs());
2180 dbgs() << " has unknown dependence\n";);
2181 return false;
2182 }
2183
2184 auto AV = AnalyzeLoadAvailability(L, Dep, L->getPointerOperand());
2185 if (!AV)
2186 return false;
2187
2188 Value *AvailableValue = AV->MaterializeAdjustedValue(L, L);
2189
2190
2191 ICF->removeUsersOf(L);
2192 L->replaceAllUsesWith(AvailableValue);
2193 if (MSSAU)
2194 MSSAU->removeMemoryAccess(L);
2195 ++NumGVNLoad;
2198
2199
2201 MD->invalidateCachedPointerInfo(AvailableValue);
2202 return true;
2203}
2204
2205
2206
2207bool GVNPass::processMaskedLoad(IntrinsicInst *I) {
2208 if (!MD)
2209 return false;
2210 MemDepResult Dep = MD->getDependency(I);
2212 if (!DepInst || !Dep.isLocal() || !Dep.isDef())
2213 return false;
2214
2216 Value *Passthrough = I->getOperand(2);
2217 Value *StoreVal;
2218 if ((DepInst,
2220 StoreVal->getType() != I->getType())
2221 return false;
2222
2223
2225 I->getIterator());
2226
2227 ICF->removeUsersOf(I);
2228 I->replaceAllUsesWith(OpToForward);
2230 ++NumGVNLoad;
2231 return true;
2232}
2233
2234
2235
2236std::pair<uint32_t, bool>
2237GVNPass::ValueTable::assignExpNewValueNum(Expression &Exp) {
2238 uint32_t &E = ExpressionNumbering[Exp];
2239 bool CreateNewValNum = ;
2240 if (CreateNewValNum) {
2241 Expressions.push_back(Exp);
2242 if (ExprIdx.size() < NextValueNumber + 1)
2243 ExprIdx.resize(NextValueNumber * 2);
2244 E = NextValueNumber;
2245 ExprIdx[NextValueNumber++] = NextExprNumber++;
2246 }
2247 return {E, CreateNewValNum};
2248}
2249
2250
2251
2252bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
2255 GVN.LeaderTable.getLeaders(Num),
2257}
2258
2259
2263 auto FindRes = PhiTranslateTable.find({Num, Pred});
2264 if (FindRes != PhiTranslateTable.end())
2265 return FindRes->second;
2266 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, GVN);
2267 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2268 return NewNum;
2269}
2270
2271
2272
2273bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
2278 auto Leaders = GVN.LeaderTable.getLeaders(Num);
2279 for (const auto &Entry : Leaders) {
2281 if (Call && Call->getParent() == PhiBlock)
2282 break;
2283 }
2284
2285 if (AA->doesNotAccessMemory(Call))
2286 return true;
2287
2288 if (!MD || ->onlyReadsMemory(Call))
2289 return false;
2290
2293 return false;
2294
2297
2298
2300 if (D.getResult().isNonFuncLocal())
2301 return true;
2302 }
2303 return false;
2304}
2305
2306
2307
2308uint32_t GVNPass::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
2309 const BasicBlock *PhiBlock,
2310 uint32_t Num, GVNPass &GVN) {
2311
2312
2313 if (PHINode *PN = NumberingPhi[Num]) {
2314 if (PN->getParent() != PhiBlock)
2315 return Num;
2316 for (unsigned I = 0; I != PN->getNumIncomingValues(); ++I) {
2317 if (PN->getIncomingBlock(I) != Pred)
2318 continue;
2319 if (uint32_t TransVal = lookup(PN->getIncomingValue(I), false))
2320 return TransVal;
2321 }
2322 return Num;
2323 }
2324
2325 if (BasicBlock *BB = NumberingBB[Num]) {
2326 assert(MSSA && "NumberingBB is non-empty only when using MemorySSA");
2327
2328
2329
2330 if (BB != PhiBlock)
2331 return Num;
2332 MemoryPhi *MPhi = MSSA->getMemoryAccess(BB);
2335 continue;
2338 return lookupOrAdd(PredPhi->getBlock());
2339 if (MSSA->isLiveOnEntryDef(MA))
2342 }
2344 "CFG/MemorySSA mismatch: predecessor not found among incoming blocks");
2345 }
2346
2347
2348
2349
2350 if (!areAllValsInBB(Num, PhiBlock, GVN))
2351 return Num;
2352
2353 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2354 return Num;
2356
2357 for (unsigned I = 0; I < Exp.VarArgs.size(); I++) {
2358
2359
2360
2361 if ((I > 1 && Exp.Opcode == Instruction::InsertValue) ||
2362 (I > 0 && Exp.Opcode == Instruction::ExtractValue) ||
2363 (I > 1 && Exp.Opcode == Instruction::ShuffleVector))
2364 continue;
2365 Exp.VarArgs[I] = phiTranslate(Pred, PhiBlock, Exp.VarArgs[I], GVN);
2366 }
2367
2368 if (Exp.Commutative) {
2369 assert(Exp.VarArgs.size() >= 2 && "Unsupported commutative instruction!");
2370 if (Exp.VarArgs[0] > Exp.VarArgs[1]) {
2372 uint32_t Opcode = Exp.Opcode >> 8;
2373 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2374 Exp.Opcode = (Opcode << 8) |
2377 }
2378 }
2379
2380 if (uint32_t NewNum = ExpressionNumbering[Exp]) {
2381 if (Exp.Opcode == Instruction::Call && NewNum != Num)
2382 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, GVN) ? NewNum : Num;
2383 return NewNum;
2384 }
2385 return Num;
2386}
2387
2388
2389
2390void GVNPass::ValueTable::eraseTranslateCacheEntry(
2393 PhiTranslateTable.erase({Num, Pred});
2394}
2395
2396
2397
2398
2399
2400
2402 auto Leaders = LeaderTable.getLeaders(Num);
2403 if (Leaders.empty())
2404 return nullptr;
2405
2406 Value *Val = nullptr;
2407 for (const auto &Entry : Leaders) {
2408 if (DT->dominates(Entry.BB, BB)) {
2409 Val = Entry.Val;
2411 return Val;
2412 }
2413 }
2414
2415 return Val;
2416}
2417
2418
2419
2420
2423
2424
2425
2426
2427
2428 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
2429 assert((!Pred || Pred == E.getStart()) &&
2430 "No edge between these basic blocks!");
2431 return Pred != nullptr;
2432}
2433
2434void GVNPass::assignBlockRPONumber(Function &F) {
2435 BlockRPONumber.clear();
2436 uint32_t NextBlockNumber = 1;
2437 ReversePostOrderTraversal<Function *> RPOT(&F);
2438 for (BasicBlock *BB : RPOT)
2439 BlockRPONumber[BB] = NextBlockNumber++;
2440 InvalidBlockRPONumbers = false;
2441}
2442
2443
2444
2445
2446
2447
2448bool GVNPass::propagateEquality(
2450 const std::variant<BasicBlockEdge, Instruction *> &Root) {
2455 if (const BasicBlockEdge *Edge = std::get_if(&Root)) {
2456
2457
2460 } else {
2461 Instruction *I = std::get<Instruction *>(Root);
2462 for (const auto *Node : DT->getNode(I->getParent())->children())
2464 }
2465
2466 while (!Worklist.empty()) {
2467 std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2468 LHS = Item.first; RHS = Item.second;
2469
2471 continue;
2473
2474
2476 continue;
2477
2478
2482 const DataLayout &DL =
2486
2487
2488
2489
2490
2491 uint32_t LVN = VN.lookupOrAdd(LHS);
2494
2495
2496 uint32_t RVN = VN.lookupOrAdd(RHS);
2497 if (LVN < RVN) {
2499 LVN = RVN;
2500 }
2501 }
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2513 for (const BasicBlock *BB : DominatedBlocks)
2514 LeaderTable.insert(LVN, RHS, BB);
2515
2516
2517
2518
2520
2521 auto CanReplacePointersCallBack = [&DL](const Use &U, const Value *To) {
2523 };
2524 unsigned NumReplacements;
2525 if (const BasicBlockEdge *Edge = std::get_if(&Root))
2527 LHS, RHS, *DT, *Edge, CanReplacePointersCallBack);
2528 else
2530 LHS, RHS, *DT, std::get<Instruction *>(Root),
2531 CanReplacePointersCallBack);
2532
2533 if (NumReplacements > 0) {
2535 NumGVNEqProp += NumReplacements;
2536
2537 if (MD)
2538 MD->invalidateCachedPointerInfo(LHS);
2539 }
2540 }
2541
2542
2543
2544
2545
2547
2548 continue;
2550 if (!CI)
2551
2552 continue;
2553
2554 bool IsKnownTrue = CI->isMinusOne();
2555 bool IsKnownFalse = !IsKnownTrue;
2556
2557
2558
2564 continue;
2565 }
2566
2567
2568
2569
2571 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2572
2573
2574
2575
2576 if (Cmp->isEquivalence(IsKnownFalse))
2577 Worklist.push_back(std::make_pair(Op0, Op1));
2578
2579
2581 Constant *NotVal = ConstantInt::get(Cmp->getType(), IsKnownFalse);
2582
2583
2584
2585 uint32_t NextNum = VN.getNextUnusedValueNumber();
2586 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2587
2588
2589 if (Num < NextNum) {
2590 for (const auto &Entry : LeaderTable.getLeaders(Num)) {
2591
2592
2593
2594
2595 if (const BasicBlockEdge *Edge = std::get_if(&Root)) {
2596 if (!DT->dominates(Entry.BB, Edge->getStart()) &&
2597 !DT->dominates(Edge->getEnd(), Entry.BB))
2598 continue;
2599 } else {
2600 auto *InstBB = std::get<Instruction *>(Root)->getParent();
2601 if (!DT->dominates(Entry.BB, InstBB) &&
2602 !DT->dominates(InstBB, Entry.BB))
2603 continue;
2604 }
2605
2608 unsigned NumReplacements;
2609 if (const BasicBlockEdge *Edge = std::get_if(&Root))
2610 NumReplacements =
2612 else
2614 NotCmp, NotVal, *DT, std::get<Instruction *>(Root));
2615 Changed |= NumReplacements > 0;
2616 NumGVNEqProp += NumReplacements;
2617
2618 if (MD)
2619 MD->invalidateCachedPointerInfo(NotCmp);
2620 }
2621 }
2622 }
2623
2624
2625
2626
2627 for (const BasicBlock *BB : DominatedBlocks)
2628 LeaderTable.insert(Num, NotVal, BB);
2629
2630 continue;
2631 }
2632
2633
2634
2635
2637 Worklist.emplace_back(A, ConstantInt::get(A->getType(), IsKnownTrue));
2638 continue;
2639 }
2640
2642 Worklist.emplace_back(A, ConstantInt::get(A->getType(), !IsKnownTrue));
2643 continue;
2644 }
2645 }
2646
2648}
2649
2650
2651
2652bool GVNPass::processInstruction(Instruction *I) {
2653
2654
2655
2656
2657 const DataLayout &DL = I->getDataLayout();
2660 if (->use_empty()) {
2661
2662
2663 ICF->removeUsersOf(I);
2664 I->replaceAllUsesWith(V);
2666 }
2670 }
2672 if (MD && V->getType()->isPtrOrPtrVectorTy())
2673 MD->invalidateCachedPointerInfo(V);
2674 ++NumGVNSimpl;
2675 return true;
2676 }
2677 }
2678
2680 return processAssumeIntrinsic(Assume);
2681
2683 if (processLoad(Load))
2684 return true;
2685
2686 unsigned Num = VN.lookupOrAdd(Load);
2687 LeaderTable.insert(Num, Load, Load->getParent());
2688 return false;
2689 }
2690
2693 return true;
2694
2695
2696
2698 if (!BI->isConditional())
2699 return false;
2700
2702 return processFoldableCondBr(BI);
2703
2704 Value *BranchCond = BI->getCondition();
2705 BasicBlock *TrueSucc = BI->getSuccessor(0);
2706 BasicBlock *FalseSucc = BI->getSuccessor(1);
2707
2708 if (TrueSucc == FalseSucc)
2709 return false;
2710
2711 BasicBlock *Parent = BI->getParent();
2713
2715 BasicBlockEdge TrueE(Parent, TrueSucc);
2716 Changed |= propagateEquality(BranchCond, TrueVal, TrueE);
2717
2719 BasicBlockEdge FalseE(Parent, FalseSucc);
2720 Changed |= propagateEquality(BranchCond, FalseVal, FalseE);
2721
2723 }
2724
2725
2727 Value *SwitchCond = SI->getCondition();
2730
2731
2732 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2733 for (BasicBlock *Succ : successors(Parent))
2734 ++SwitchEdges[Succ];
2735
2736 for (const auto &Case : SI->cases()) {
2737 BasicBlock *Dst = Case.getCaseSuccessor();
2738
2739 if (SwitchEdges.lookup(Dst) == 1) {
2740 BasicBlockEdge E(Parent, Dst);
2741 Changed |= propagateEquality(SwitchCond, Case.getCaseValue(), E);
2742 }
2743 }
2745 }
2746
2747
2748
2749 if (I->getType()->isVoidTy())
2750 return false;
2751
2752 uint32_t NextNum = VN.getNextUnusedValueNumber();
2753 unsigned Num = VN.lookupOrAdd(I);
2754
2755
2756
2758 LeaderTable.insert(Num, I, I->getParent());
2759 return false;
2760 }
2761
2762
2763
2764
2765 if (Num >= NextNum) {
2766 LeaderTable.insert(Num, I, I->getParent());
2767 return false;
2768 }
2769
2770
2771
2772 Value *Repl = findLeader(I->getParent(), Num);
2773 if (!Repl) {
2774
2775 LeaderTable.insert(Num, I, I->getParent());
2776 return false;
2777 }
2778
2779 if (Repl == I) {
2780
2781
2782 return false;
2783 }
2784
2785
2788 MD->invalidateCachedPointerInfo(Repl);
2790 return true;
2791}
2792
2793
2794bool GVNPass::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2795 const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2796 MemoryDependenceResults *RunMD, LoopInfo &LI,
2797 OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) {
2798 AC = &RunAC;
2799 DT = &RunDT;
2800 VN.setDomTree(DT);
2801 TLI = &RunTLI;
2802 VN.setAliasAnalysis(&RunAA);
2803 MD = RunMD;
2804 ImplicitControlFlowTracking ImplicitCFT;
2805 ICF = &ImplicitCFT;
2806 this->LI = &LI;
2807 VN.setMemDep(MD);
2808 VN.setMemorySSA(MSSA);
2809 ORE = RunORE;
2810 InvalidBlockRPONumbers = true;
2811 MemorySSAUpdater Updater(MSSA);
2812 MSSAU = MSSA ? &Updater : nullptr;
2813
2815 bool ShouldContinue = true;
2816
2817 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2818
2819
2822 if (RemovedBlock)
2823 ++NumGVNBlocks;
2824
2825 Changed |= RemovedBlock;
2826 }
2827 DTU.flush();
2828
2829 unsigned Iteration = 0;
2830 while (ShouldContinue) {
2831 LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2832 (void) Iteration;
2833 ShouldContinue = iterateOnFunction(F);
2834 Changed |= ShouldContinue;
2835 ++Iteration;
2836 }
2837
2839
2840
2841 assignValNumForDeadCode();
2842 bool PREChanged = true;
2843 while (PREChanged) {
2844 PREChanged = performPRE(F);
2846 }
2847 }
2848
2849
2850
2851
2852
2853
2854 cleanupGlobalSets();
2855
2856
2857 DeadBlocks.clear();
2858
2861
2863}
2864
2865bool GVNPass::processBlock(BasicBlock *BB) {
2866 if (DeadBlocks.count(BB))
2867 return false;
2868
2869 bool ChangedFunction = false;
2870
2871
2872
2873
2874
2875 SmallPtrSet<PHINode *, 8> PHINodesToRemove;
2877 for (PHINode *PN : PHINodesToRemove) {
2878 removeInstruction(PN);
2879 }
2881 ChangedFunction |= processInstruction(&Inst);
2882 return ChangedFunction;
2883}
2884
2885
2886bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2887 BasicBlock *Curr, unsigned int ValNo) {
2888
2889
2890
2891
2893 for (unsigned I = 0, E = Instr->getNumOperands(); I != E; ++I) {
2896 continue;
2897
2898
2899
2900
2901 if (!VN.exists(Op)) {
2903 break;
2904 }
2905 uint32_t TValNo =
2906 VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2907 if (Value *V = findLeader(Pred, TValNo)) {
2909 } else {
2911 break;
2912 }
2913 }
2914
2915
2916
2917
2919 return false;
2920
2922 Instr->setName(Instr->getName() + ".pre");
2923 Instr->setDebugLoc(Instr->getDebugLoc());
2924
2925 ICF->insertInstructionTo(Instr, Pred);
2926
2927 unsigned Num = VN.lookupOrAdd(Instr);
2928 VN.add(Instr, Num);
2929
2930
2931 LeaderTable.insert(Num, Instr, Pred);
2932 return true;
2933}
2934
2935bool GVNPass::performScalarPRE(Instruction *CurInst) {
2940 return false;
2941
2942
2943
2944
2945
2947 return false;
2948
2949
2950
2951
2952
2953
2954
2955
2957 return false;
2958
2960
2961 if (CallB->isInlineAsm())
2962 return false;
2963 }
2964
2965 uint32_t ValNo = VN.lookup(CurInst);
2966
2967
2968
2969
2970
2971
2972
2973 unsigned NumWith = 0;
2974 unsigned NumWithout = 0;
2977
2978
2979 if (InvalidBlockRPONumbers)
2980 assignBlockRPONumber(*CurrentBlock->getParent());
2981
2983 for (BasicBlock *P : predecessors(CurrentBlock)) {
2984
2985
2986 if (!DT->isReachableFromEntry(P)) {
2987 NumWithout = 2;
2988 break;
2989 }
2990
2991 assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2992 "Invalid BlockRPONumber map.");
2993 if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock]) {
2994 NumWithout = 2;
2995 break;
2996 }
2997
2998 uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2999 Value *PredV = findLeader(P, TValNo);
3000 if (!PredV) {
3001 PredMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
3002 PREPred = P;
3003 ++NumWithout;
3004 } else if (PredV == CurInst) {
3005
3006 NumWithout = 2;
3007 break;
3008 } else {
3009 PredMap.push_back(std::make_pair(PredV, P));
3010 ++NumWith;
3011 }
3012 }
3013
3014
3015
3016 if (NumWithout > 1 || NumWith == 0)
3017 return false;
3018
3019
3020
3021
3023
3024 if (NumWithout != 0) {
3026
3027
3028
3029
3030 if (ICF->isDominatedByICFIFromSameBlock(CurInst))
3031 return false;
3032 }
3033
3034
3036 return false;
3037
3038
3039
3040
3043 ToSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
3044 return false;
3045 }
3046
3047 PREInstr = CurInst->clone();
3048 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
3049
3050#ifndef NDEBUG
3051 verifyRemoved(PREInstr);
3052#endif
3054 return false;
3055 }
3056 }
3057
3058
3059
3060 assert(PREInstr != nullptr || NumWithout == 0);
3061
3062 ++NumGVNPRE;
3063
3064
3066 CurInst->getName() + ".pre-phi");
3067 Phi->insertBefore(CurrentBlock->begin());
3068 for (auto &[V, BB] : PredMap) {
3069 if (V) {
3070
3071
3073 Phi->addIncoming(V, BB);
3074 } else
3075 Phi->addIncoming(PREInstr, PREPred);
3076 }
3077
3078 VN.add(Phi, ValNo);
3079
3080
3081 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
3082 LeaderTable.insert(ValNo, Phi, CurrentBlock);
3085 if (MD && Phi->getType()->isPtrOrPtrVectorTy())
3086 MD->invalidateCachedPointerInfo(Phi);
3087 LeaderTable.erase(ValNo, CurInst, CurrentBlock);
3088
3089 LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
3090 removeInstruction(CurInst);
3091 ++NumGVNInstr;
3092
3093 return true;
3094}
3095
3096
3097
3098bool GVNPass::performPRE(Function &F) {
3100 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
3101
3102 if (CurrentBlock == &F.getEntryBlock())
3103 continue;
3104
3105
3106 if (CurrentBlock->isEHPad())
3107 continue;
3108
3110 BE = CurrentBlock->end();
3111 BI != BE;) {
3113 Changed |= performScalarPRE(CurInst);
3114 }
3115 }
3116
3117 if (splitCriticalEdges())
3119
3121}
3122
3123
3124
3125BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
3126
3127
3129 Pred, Succ,
3130 CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
3131 if (BB) {
3132 if (MD)
3133 MD->invalidateCachedPredecessors();
3134 InvalidBlockRPONumbers = true;
3135 }
3136 return BB;
3137}
3138
3139
3140
3141bool GVNPass::splitCriticalEdges() {
3142 if (ToSplit.empty())
3143 return false;
3144
3146 do {
3147 std::pair<Instruction *, unsigned> Edge = ToSplit.pop_back_val();
3149 CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
3150 nullptr;
3151 } while (!ToSplit.empty());
3153 if (MD)
3154 MD->invalidateCachedPredecessors();
3155 InvalidBlockRPONumbers = true;
3156 }
3158}
3159
3160
3161bool GVNPass::iterateOnFunction(Function &F) {
3162 cleanupGlobalSets();
3163
3164
3166
3167
3168
3169 ReversePostOrderTraversal<Function *> RPOT(&F);
3170
3171 for (BasicBlock *BB : RPOT)
3172 Changed |= processBlock(BB);
3173
3175}
3176
3177void GVNPass::cleanupGlobalSets() {
3178 VN.clear();
3179 LeaderTable.clear();
3180 BlockRPONumber.clear();
3181 ICF->clear();
3182 InvalidBlockRPONumbers = true;
3183}
3184
3185void GVNPass::removeInstruction(Instruction *I) {
3186 VN.erase(I);
3187 if (MD) MD->removeInstruction(I);
3188 if (MSSAU)
3189 MSSAU->removeMemoryAccess(I);
3190#ifndef NDEBUG
3191 verifyRemoved(I);
3192#endif
3193 ICF->removeInstruction(I);
3194 I->eraseFromParent();
3195}
3196
3197
3198
3199void GVNPass::verifyRemoved(const Instruction *Inst) const {
3200 VN.verifyRemoved(Inst);
3201 LeaderTable.verifyRemoved(Inst);
3202}
3203
3204
3205
3206
3207
3208void GVNPass::addDeadBlock(BasicBlock *BB) {
3210 SmallSetVector<BasicBlock *, 4> DF;
3211
3213 while (!NewDead.empty()) {
3215 if (DeadBlocks.count(D))
3216 continue;
3217
3218
3219 SmallVector<BasicBlock *, 8> Dom;
3220 DT->getDescendants(D, Dom);
3221 DeadBlocks.insert_range(Dom);
3222
3223
3224 for (BasicBlock *B : Dom) {
3226 if (DeadBlocks.count(S))
3227 continue;
3228
3229 bool AllPredDead = true;
3231 if (!DeadBlocks.count(P)) {
3232 AllPredDead = false;
3233 break;
3234 }
3235
3236 if (!AllPredDead) {
3237
3238
3239 DF.insert(S);
3240 } else {
3241
3242
3243
3245 }
3246 }
3247 }
3248 }
3249
3250
3251
3252 for (BasicBlock *B : DF) {
3253 if (DeadBlocks.count(B))
3254 continue;
3255
3256
3257
3259 for (BasicBlock *P : Preds) {
3260 if (!DeadBlocks.count(P))
3261 continue;
3262
3265 if (BasicBlock *S = splitCriticalEdges(P, B))
3266 DeadBlocks.insert(P = S);
3267 }
3268 }
3269
3270
3272 if (!DeadBlocks.count(P))
3273 continue;
3274 for (PHINode &Phi : B->phis()) {
3276 if (MD)
3277 MD->invalidateCachedPointerInfo(&Phi);
3278 }
3279 }
3280 }
3281}
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296bool GVNPass::processFoldableCondBr(BranchInst *BI) {
3298 return false;
3299
3300
3302 return false;
3303
3306 return false;
3307
3310 if (DeadBlocks.count(DeadRoot))
3311 return false;
3312
3314 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
3315
3316 addDeadBlock(DeadRoot);
3317 return true;
3318}
3319
3320
3321
3322
3323
3324void GVNPass::assignValNumForDeadCode() {
3325 for (BasicBlock *BB : DeadBlocks) {
3326 for (Instruction &Inst : *BB) {
3327 unsigned ValNum = VN.lookupOrAdd(&Inst);
3328 LeaderTable.insert(ValNum, &Inst, BB);
3329 }
3330 }
3331}
3332
3334public:
3336
3340 .setMemDep(MemDepAnalysis)
3341 .setMemorySSA(MemSSAAnalysis)) {
3343 }
3344
3347 return false;
3348
3350 if (Impl.isMemorySSAEnabled() && !MSSAWP)
3352
3353 return Impl.runImpl(
3358 Impl.isMemDepEnabled()
3360 : nullptr,
3363 MSSAWP ? &MSSAWP->getMSSA() : nullptr);
3364 }
3365
3371 if (Impl.isMemDepEnabled())
3380 if (Impl.isMemorySSAEnabled())
3382 }
3383
3384private:
3386};
3387
3389
3400
3401
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
early cse Early CSE w MemorySSA
static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, const DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
Definition GVN.cpp:1283
static void reportLoadElim(LoadInst *Load, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
Definition GVN.cpp:1979
static cl::opt< uint32_t > MaxNumInsnsPerBlock("gvn-max-num-insns", cl::Hidden, cl::init(100), cl::desc("Max number of instructions to scan in each basic block in GVN " "(default = 100)"))
static cl::opt< bool > GVNEnableMemDep("enable-gvn-memdep", cl::init(true))
static cl::opt< bool > GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", cl::init(true))
static const Instruction * findMayClobberedPtrAccess(LoadInst *Load, const DominatorTree *DT)
Definition GVN.cpp:1227
static cl::opt< uint32_t > MaxNumDeps("gvn-max-num-deps", cl::Hidden, cl::init(100), cl::desc("Max number of dependences to attempt Load PRE (default = 100)"))
static cl::opt< bool > GVNEnableMemorySSA("enable-gvn-memoryssa", cl::init(false))
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
Definition GVN.cpp:2421
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, AvailabilityState > &FullyAvailableBlocks)
Return true if we can prove that the value we're analyzing is fully available in the specified block.
Definition GVN.cpp:967
static Value * findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, Instruction *From, AAResults *AA)
Definition GVN.cpp:1303
static bool liesBetween(const Instruction *From, Instruction *Between, const Instruction *To, const DominatorTree *DT)
Assuming To can be reached from both From and Between, does Between lie on every path from From to To...
Definition GVN.cpp:1218
static bool isLifetimeStart(const Instruction *Inst)
Definition GVN.cpp:1210
static cl::opt< bool > GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", cl::init(false))
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
Definition GVN.cpp:2144
static void replaceValuesPerBlockEntry(SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, Value *OldValue, Value *NewValue)
If the specified OldValue exists in ValuesPerBlock, replace its value with NewValue.
Definition GVN.cpp:1083
static Value * ConstructSSAForLoadSet(LoadInst *Load, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVNPass &GVN)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate Load.
Definition GVN.cpp:1102
AvailabilityState
Definition GVN.cpp:947
@ Unavailable
We know the block is not fully available. This is a fixpoint.
Definition GVN.cpp:949
@ Available
We know the block is fully available. This is a fixpoint.
Definition GVN.cpp:951
@ SpeculativelyAvailable
We do not know whether the block is fully available or not, but we are currently speculating that it ...
Definition GVN.cpp:956
static cl::opt< bool > GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden)
static cl::opt< uint32_t > MaxNumVisitedInsts("gvn-max-num-visited-insts", cl::Hidden, cl::init(100), cl::desc("Max number of visited instructions when trying to find " "dominating value of select dependency (default = 100)"))
static cl::opt< uint32_t > MaxBBSpeculations("gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::desc("Max number of blocks we're willing to speculate on (and recurse " "into) when deducing if a value is fully available or not in GVN " "(default = 600)"))
static cl::opt< bool > GVNEnableLoadPRE("enable-load-pre", cl::init(true))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
This file implements a map that provides insertion order iteration.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
uint64_t IntrinsicInst * II
ppc ctr loops PowerPC CTR Loops Verify
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
bool isEHPad() const
Return true if this basic block is an exception handling block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Analysis pass which computes a DominatorTree.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Class representing an expression and its matching format.
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
Represents calls to the gc.relocate intrinsic.
This class holds the mapping between values and value numbers.
LLVM_ABI uint32_t lookupOrAdd(MemoryAccess *MA)
Definition GVN.cpp:642
The core GVN pass object.
friend class gvn::GVNLegacyPass
LLVM_ABI bool isPREEnabled() const
Definition GVN.cpp:853
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
Definition GVN.cpp:878
LLVM_ABI void salvageAndRemoveInstruction(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
Definition GVN.cpp:930
AAResults * getAliasAnalysis() const
LLVM_ABI bool isLoadPREEnabled() const
Definition GVN.cpp:857
GVNPass(GVNOptions Options={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition GVN.cpp:910
LLVM_ABI bool isMemorySSAEnabled() const
Definition GVN.cpp:874
DominatorTree & getDominatorTree() const
LLVM_ABI bool isLoadInLoopPREEnabled() const
Definition GVN.cpp:861
LLVM_ABI bool isLoadPRESplitBackedgeEnabled() const
Definition GVN.cpp:865
LLVM_ABI bool isMemDepEnabled() const
Definition GVN.cpp:870
Legacy wrapper pass to provide the GlobalsAAResult object.
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
LLVM_ABI bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
LLVM_ABI void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs={})
Drop all unknown metadata except for debug locations.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
The legacy pass manager's analysis pass to compute loop information.
iterator find(const KeyT &Key)
A memory dependence query can return one of three different answers.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
This is the common base class for memset/memcpy/memmove.
BasicBlock * getBlock() const
An analysis that produces MemoryDependenceResults for a function.
std::vector< NonLocalDepEntry > NonLocalDepInfo
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
An analysis that produces MemorySSA for a function.
Legacy analysis pass which computes MemorySSA.
LLVM_ABI void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
This is an entry in the NonLocalDepInfo cache.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AnalysisType & getAnalysis() const
getAnalysis() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable() - Subclasses use this function to get analysis information tha...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Helper class for SSA formation on a set of values defined in multiple blocks.
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
This class represents the LLVM 'select' instruction.
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
SmallVector & operator=(const SmallVector &RHS)
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetLibraryInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
bool hasUseList() const
Check if this Value has a use-list.
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
LLVM_ABI void deleteValue()
Delete a pointer to a generic Value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
An efficient, type-erasing, non-owning reference to a callable.
GVNLegacyPass(bool MemDepAnalysis=GVNEnableMemDep, bool MemSSAAnalysis=GVNEnableMemorySSA)
Definition GVN.cpp:3337
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition GVN.cpp:3366
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition GVN.cpp:3345
static char ID
Definition GVN.cpp:3335
An opaque object representing a hash code.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_MaskedStore(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
Matches MaskedStore Intrinsic.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_IntrinsicIntrinsic::fabs(m_Value(X))
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
Value * getValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, Function *F)
If analyzeLoadFromClobberingStore/Load returned an offset, this function can be used to actually perf...
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, Function *F)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
initializer< Ty > init(const Ty &Val)
A private "module" namespace for types and utilities used by GVN.
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
NodeAddr< InstrNode * > Instr
NodeAddr< PhiNode * > Phi
NodeAddr< UseNode * > Use
NodeAddr< NodeBase * > Node
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
hash_code hash_value(const FixedPointSemantics &Val)
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
LLVM_ABI unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
LLVM_ABI unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast - Return the argument parameter cast to the specified type.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
constexpr from_range_t from_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
bool isModSet(const ModRefInfo MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
LLVM_ABI void initializeGVNLegacyPassPass(PassRegistry &)
LLVM_ABI unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
Definition GVN.cpp:3402
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
LLVM_ABI BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
LLVM_ABI bool isCriticalEdge(const Instruction *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
constexpr unsigned BitWidth
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool pred_empty(const BasicBlock *BB)
iterator_range< df_iterator< T > > depth_first(const T &G)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
LLVM_ABI bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
LLVM_ABI bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static GVNPass::Expression getTombstoneKey()
Definition GVN.cpp:175
static bool isEqual(const GVNPass::Expression &LHS, const GVNPass::Expression &RHS)
Definition GVN.cpp:183
static GVNPass::Expression getEmptyKey()
Definition GVN.cpp:174
static unsigned getHashValue(const GVNPass::Expression &E)
Definition GVN.cpp:177
An information struct used to provide DenseMap with the various necessary components for a given valu...
A set of parameters to control various transforms performed by GVN pass.
bool Commutative
Definition GVN.cpp:142
bool operator==(const Expression &Other) const
Definition GVN.cpp:152
uint32_t Opcode
Definition GVN.cpp:141
friend hash_code hash_value(const Expression &Value)
Definition GVN.cpp:167
Type * Ty
Definition GVN.cpp:145
SmallVector< uint32_t, 4 > VarArgs
Definition GVN.cpp:146
AttributeList Attrs
Definition GVN.cpp:148
Expression(uint32_t Op=~2U)
Definition GVN.cpp:150
A CRTP mix-in to automatically provide informational APIs needed for passes.
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock.
Definition GVN.cpp:289
static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset=0)
Definition GVN.cpp:303
static AvailableValueInBlock getUndef(BasicBlock *BB)
Definition GVN.cpp:308
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
Definition GVN.cpp:296
AvailableValue AV
AV - The actual available value.
Definition GVN.cpp:294
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel, Value *V1, Value *V2)
Definition GVN.cpp:312
BasicBlock * BB
BB - The basic block in question.
Definition GVN.cpp:291
Value * MaterializeAdjustedValue(LoadInst *Load) const
Emit code at the end of this block to adjust the value defined here to the specified type.
Definition GVN.cpp:319
Represents a particular available value that we know how to materialize.
Definition GVN.cpp:193
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
Definition GVN.cpp:210
bool isSimpleValue() const
Definition GVN.cpp:256
Value * V2
Definition GVN.cpp:212
static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2)
Definition GVN.cpp:246
bool isCoercedLoadValue() const
Definition GVN.cpp:257
ValType
Definition GVN.cpp:194
@ SimpleVal
Definition GVN.cpp:195
@ SelectVal
Definition GVN.cpp:200
@ LoadVal
Definition GVN.cpp:196
@ UndefVal
Definition GVN.cpp:198
@ MemIntrin
Definition GVN.cpp:197
static AvailableValue get(Value *V, unsigned Offset=0)
Definition GVN.cpp:214
ValType Kind
Kind of the live-out value.
Definition GVN.cpp:207
LoadInst * getCoercedLoadValue() const
Definition GVN.cpp:267
static AvailableValue getLoad(LoadInst *Load, unsigned Offset=0)
Definition GVN.cpp:230
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
Definition GVN.cpp:222
bool isUndefValue() const
Definition GVN.cpp:259
bool isSelectValue() const
Definition GVN.cpp:260
Value * Val
Val - The value that is live out of the block.
Definition GVN.cpp:205
Value * V1
V1, V2 - The dominating non-clobbered values of SelectVal.
Definition GVN.cpp:212
static AvailableValue getUndef()
Definition GVN.cpp:238
SelectInst * getSelectValue() const
Definition GVN.cpp:277
Value * getSimpleValue() const
Definition GVN.cpp:262
bool isMemIntrinValue() const
Definition GVN.cpp:258
MemIntrinsic * getMemIntrinValue() const
Definition GVN.cpp:272
Value * MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt) const
Emit code at the specified insertion point to adjust the value defined here to the specified type.
Definition GVN.cpp:1145