LLVM: lib/Transforms/Scalar/EarlyCSE.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
55#include
56#include
57#include
58#include
59
60using namespace llvm;
62
63#define DEBUG_TYPE "early-cse"
64
65STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
66STATISTIC(NumCSE, "Number of instructions CSE'd");
67STATISTIC(NumCSECVP, "Number of compare instructions CVP'd");
68STATISTIC(NumCSELoad, "Number of load instructions CSE'd");
69STATISTIC(NumCSECall, "Number of call instructions CSE'd");
70STATISTIC(NumCSEGEP, "Number of GEP instructions CSE'd");
71STATISTIC(NumDSE, "Number of trivial dead stores removed");
72
74 "Controls which instructions are removed");
75
78 cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange "
79 "for faster compile. Caps the MemorySSA clobbering calls."));
80
83 cl::desc("Perform extra assertion checking to verify that SimpleValue's hash "
84 "function is well-behaved w.r.t. its isEqual predicate"));
85
86
87
88
89
90namespace {
91
92
93struct SimpleValue {
95
97 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
98 }
99
103 }
104
105 static bool canHandle(Instruction *Inst) {
106
107
108
109 if (CallInst *CI = dyn_cast(Inst)) {
110 if (Function *F = CI->getCalledFunction()) {
112 case Intrinsic::experimental_constrained_fadd:
113 case Intrinsic::experimental_constrained_fsub:
114 case Intrinsic::experimental_constrained_fmul:
115 case Intrinsic::experimental_constrained_fdiv:
116 case Intrinsic::experimental_constrained_frem:
117 case Intrinsic::experimental_constrained_fptosi:
118 case Intrinsic::experimental_constrained_sitofp:
119 case Intrinsic::experimental_constrained_fptoui:
120 case Intrinsic::experimental_constrained_uitofp:
121 case Intrinsic::experimental_constrained_fcmp:
122 case Intrinsic::experimental_constrained_fcmps: {
123 auto *CFP = cast(CI);
124 if (CFP->getExceptionBehavior() &&
126 return false;
127
128
129 if (CFP->getRoundingMode() &&
130 CFP->getRoundingMode() == RoundingMode::Dynamic)
131 return false;
132 return true;
133 }
134 }
135 }
136 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy() &&
137
138
139
140
141
142
143
144 !CI->getFunction()->isPresplitCoroutine();
145 }
146 return isa(Inst) || isa(Inst) ||
147 isa(Inst) || isa(Inst) ||
148 isa(Inst) || isa(Inst) ||
149 isa(Inst) || isa(Inst) ||
150 isa(Inst) || isa(Inst) ||
151 isa(Inst);
152 }
153};
154
155}
156
157namespace llvm {
158
162 }
163
166 }
167
169 static bool isEqual(SimpleValue LHS, SimpleValue RHS);
170};
171
172}
173
174
178
180 return false;
181
182
185 Cond = CondNot;
187 }
188
189
190
191
192
193
196
198
199
200
202 return true;
203 Pred = ICmpInst::getSwappedPredicate(Pred);
204 }
205
206 switch (Pred) {
211
216 default: break;
217 }
218
219 return true;
220}
221
223
224
229 }
233}
234
237
238 if (BinaryOperator *BinOp = dyn_cast(Inst)) {
239 Value *LHS = BinOp->getOperand(0);
240 Value *RHS = BinOp->getOperand(1);
241 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
243
245 }
246
247 if (CmpInst *CI = dyn_cast(Inst)) {
248
249
250
251
252 Value *LHS = CI->getOperand(0);
253 Value *RHS = CI->getOperand(1);
256 if (std::tie(LHS, Pred) > std::tie(RHS, SwappedPred)) {
258 Pred = SwappedPred;
259 }
261 }
262
263
267
268
269
270
271
277 }
278
279
280
281
286
287
288
292 }
295 }
296
297 if (CastInst *CI = dyn_cast(Inst))
298 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
299
300 if (FreezeInst *FI = dyn_cast(Inst))
301 return hash_combine(FI->getOpcode(), FI->getOperand(0));
302
303 if (const ExtractValueInst *EVI = dyn_cast(Inst))
304 return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
306
307 if (const InsertValueInst *IVI = dyn_cast(Inst))
308 return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
309 IVI->getOperand(1),
311
312 assert((isa(Inst) || isa(Inst) ||
313 isa(Inst) || isa(Inst) ||
314 isa(Inst) || isa(Inst)) &&
315 "Invalid/unknown instruction");
316
317
318 auto *II = dyn_cast(Inst);
319 if (II && II->isCommutative() && II->arg_size() >= 2) {
320 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
326 }
327
328
329
330
331 if (const GCRelocateInst *GCR = dyn_cast(Inst))
332 return hash_combine(GCR->getOpcode(), GCR->getOperand(0),
333 GCR->getBasePtr(), GCR->getDerivedPtr());
334
335
336
337 if (CallInst *CI = dyn_cast(Inst))
339
340
344}
345
347#ifndef NDEBUG
348
349
350
351
353 return 0;
354#endif
356}
357
358static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS) {
360
361 if (LHS.isSentinel() || RHS.isSentinel())
362 return LHSI == RHSI;
363
364 if (LHSI->getOpcode() != RHSI->getOpcode())
365 return false;
367
368
369
370 if (CallInst *CI = dyn_cast(LHSI);
372 return false;
373
374 return true;
375 }
376
377
378 if (BinaryOperator *LHSBinOp = dyn_cast(LHSI)) {
379 if (!LHSBinOp->isCommutative())
380 return false;
381
382 assert(isa(RHSI) &&
383 "same opcode, but different instruction type?");
384 BinaryOperator *RHSBinOp = cast(RHSI);
385
386
387 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
388 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
389 }
390 if (CmpInst *LHSCmp = dyn_cast(LHSI)) {
391 assert(isa(RHSI) &&
392 "same opcode, but different instruction type?");
393 CmpInst *RHSCmp = cast(RHSI);
394
395 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
396 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
397 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
398 }
399
400 auto *LII = dyn_cast(LHSI);
401 auto *RII = dyn_cast(RHSI);
402 if (LII && RII && LII->getIntrinsicID() == RII->getIntrinsicID() &&
403 LII->isCommutative() && LII->arg_size() >= 2) {
404 return LII->getArgOperand(0) == RII->getArgOperand(1) &&
405 LII->getArgOperand(1) == RII->getArgOperand(0) &&
406 std::equal(LII->arg_begin() + 2, LII->arg_end(),
407 RII->arg_begin() + 2, RII->arg_end());
408 }
409
410
411 if (const GCRelocateInst *GCR1 = dyn_cast(LHSI))
412 if (const GCRelocateInst *GCR2 = dyn_cast(RHSI))
413 return GCR1->getOperand(0) == GCR2->getOperand(0) &&
414 GCR1->getBasePtr() == GCR2->getBasePtr() &&
415 GCR1->getDerivedPtr() == GCR2->getDerivedPtr();
416
417
418
419
421 Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB;
424 if (LSPF == RSPF) {
425
428 return ((LHSA == RHSA && LHSB == RHSB) ||
429 (LHSA == RHSB && LHSB == RHSA));
430
431
432 if (CondL == CondR && LHSA == RHSA && LHSB == RHSB)
433 return true;
434 }
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454 if (LHSA == RHSB && LHSB == RHSA) {
460 return true;
461 }
462 }
463
464 return false;
465}
466
468
469
471 assert(!Result || (LHS.isSentinel() && LHS.Inst == RHS.Inst) ||
474}
475
476
477
478
479
480namespace {
481
482
483
484struct CallValue {
486
488 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
489 }
490
494 }
495
496 static bool canHandle(Instruction *Inst) {
497
499 return false;
500
501 CallInst *CI = dyn_cast(Inst);
503
504
505
506
507
508
509
511 return false;
512 return true;
513 }
514};
515
516}
517
518namespace llvm {
519
523 }
524
527 }
528
530 static bool isEqual(CallValue LHS, CallValue RHS);
531};
532
533}
534
537
538
540}
541
543 if (LHS.isSentinel() || RHS.isSentinel())
544 return LHS.Inst == RHS.Inst;
545
546 CallInst *LHSI = cast(LHS.Inst);
547 CallInst *RHSI = cast(RHS.Inst);
548
549
550
551
553 return false;
554
556}
557
558
559
560
561
562namespace {
563
564struct GEPValue {
566 std::optional<int64_t> ConstantOffset;
567
569 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
570 }
571
572 GEPValue(Instruction *I, std::optional<int64_t> ConstantOffset)
573 : Inst(I), ConstantOffset(ConstantOffset) {
574 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
575 }
576
580 }
581
582 static bool canHandle(Instruction *Inst) {
583 return isa(Inst);
584 }
585};
586
587}
588
589namespace llvm {
590
594 }
595
598 }
599
601 static bool isEqual(const GEPValue &LHS, const GEPValue &RHS);
602};
603
604}
605
607 auto *GEP = cast(Val.Inst);
608 if (Val.ConstantOffset.has_value())
610 Val.ConstantOffset.value());
612 GEP->getOpcode(),
614}
615
617 if (LHS.isSentinel() || RHS.isSentinel())
618 return LHS.Inst == RHS.Inst;
619 auto *LGEP = cast(LHS.Inst);
620 auto *RGEP = cast(RHS.Inst);
621 if (LGEP->getPointerOperand() != RGEP->getPointerOperand())
622 return false;
623 if (LHS.ConstantOffset.has_value() && RHS.ConstantOffset.has_value())
624 return LHS.ConstantOffset.value() == RHS.ConstantOffset.value();
625 return LGEP->isIdenticalToWhenDefined(RGEP);
626}
627
628
629
630
631
632namespace {
633
634
635
636
637
638
639
640
641class EarlyCSE {
642public:
649 std::unique_ptr MSSAUpdater;
650
651 using AllocatorTy =
654 using ScopedHTType =
656 AllocatorTy>;
657
658
659
660
661
662
663
664 ScopedHTType AvailableValues;
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
682 unsigned Generation = 0;
683 int MatchingId = -1;
684 bool IsAtomic = false;
685 bool IsLoad = false;
686
689 bool IsAtomic, bool IsLoad)
690 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
691 IsAtomic(IsAtomic), IsLoad(IsLoad) {}
692 };
693
694 using LoadMapAllocator =
697 using LoadHTType =
699 LoadMapAllocator>;
700
701 LoadHTType AvailableLoads;
702
703
704
705
706 using InvariantMapAllocator =
709 using InvariantHTType =
711 InvariantMapAllocator>;
712 InvariantHTType AvailableInvariants;
713
714
715
716
717
718 using CallHTType =
720 CallHTType AvailableCalls;
721
722 using GEPMapAllocatorTy =
726 GEPMapAllocatorTy>;
727 GEPHTType AvailableGEPs;
728
729
730 unsigned CurrentGeneration = 0;
731
732
736 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
738
739 bool run();
740
741private:
742 unsigned ClobberCounter = 0;
743
744
745
746 class NodeScope {
747 public:
748 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
749 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
750 GEPHTType &AvailableGEPs)
751 : Scope(AvailableValues), LoadScope(AvailableLoads),
752 InvariantScope(AvailableInvariants), CallScope(AvailableCalls),
753 GEPScope(AvailableGEPs) {}
754 NodeScope(const NodeScope &) = delete;
755 NodeScope &operator=(const NodeScope &) = delete;
756
757 private:
763 };
764
765
766
767
768
770 public:
771 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
772 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
773 GEPHTType &AvailableGEPs, unsigned cg, DomTreeNode *n,
776 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
777 EndIter(end),
778 Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
779 AvailableCalls, AvailableGEPs) {}
782
783
784 unsigned currentGeneration() const { return CurrentGeneration; }
785 unsigned childGeneration() const { return ChildGeneration; }
789
792 ++ChildIter;
793 return child;
794 }
795
797 bool isProcessed() const { return Processed; }
798 void process() { Processed = true; }
799
800 private:
801 unsigned CurrentGeneration;
802 unsigned ChildGeneration;
807 bool Processed = false;
808 };
809
810
811
812 class ParseMemoryInst {
813 public:
815 : Inst(Inst) {
817 IntrID = II->getIntrinsicID();
819 return;
820 if (isHandledNonTargetIntrinsic(IntrID)) {
821 switch (IntrID) {
822 case Intrinsic::masked_load:
824 Info.MatchingId = Intrinsic::masked_load;
825 Info.ReadMem = true;
826 Info.WriteMem = false;
827 Info.IsVolatile = false;
828 break;
829 case Intrinsic::masked_store:
831
832
833
834
835
836
837 Info.MatchingId = Intrinsic::masked_load;
838 Info.ReadMem = false;
839 Info.WriteMem = true;
840 Info.IsVolatile = false;
841 break;
842 }
843 }
844 }
845 }
846
849
850 bool isLoad() const {
851 if (IntrID != 0)
852 return Info.ReadMem;
853 return isa(Inst);
854 }
855
857 if (IntrID != 0)
858 return Info.WriteMem;
859 return isa(Inst);
860 }
861
862 bool isAtomic() const {
863 if (IntrID != 0)
864 return Info.Ordering != AtomicOrdering::NotAtomic;
866 }
867
868 bool isUnordered() const {
869 if (IntrID != 0)
870 return Info.isUnordered();
871
872 if (LoadInst *LI = dyn_cast(Inst)) {
873 return LI->isUnordered();
874 } else if (StoreInst *SI = dyn_cast(Inst)) {
875 return SI->isUnordered();
876 }
877
879 }
880
881 bool isVolatile() const {
882 if (IntrID != 0)
883 return Info.IsVolatile;
884
885 if (LoadInst *LI = dyn_cast(Inst)) {
886 return LI->isVolatile();
887 } else if (StoreInst *SI = dyn_cast(Inst)) {
888 return SI->isVolatile();
889 }
890
891 return true;
892 }
893
894 bool isInvariantLoad() const {
895 if (auto *LI = dyn_cast(Inst))
896 return LI->hasMetadata(LLVMContext::MD_invariant_load);
897 return false;
898 }
899
901
902
903
904
905
906 int getMatchingId() const {
907 if (IntrID != 0)
908 return Info.MatchingId;
909 return -1;
910 }
911
913 if (IntrID != 0)
914 return Info.PtrVal;
916 }
917
919
921 }
922
923 bool mayReadFromMemory() const {
924 if (IntrID != 0)
925 return Info.ReadMem;
927 }
928
929 bool mayWriteToMemory() const {
930 if (IntrID != 0)
931 return Info.WriteMem;
933 }
934
935 private:
939 };
940
941
942
943 static bool isHandledNonTargetIntrinsic(Intrinsic::ID ID) {
944 switch (ID) {
945 case Intrinsic::masked_load:
946 case Intrinsic::masked_store:
947 return true;
948 }
949 return false;
950 }
951 static bool isHandledNonTargetIntrinsic(const Value *V) {
952 if (auto *II = dyn_cast(V))
953 return isHandledNonTargetIntrinsic(II->getIntrinsicID());
954 return false;
955 }
956
958
961
963 unsigned CurrentGeneration);
964
965 bool overridingStores(const ParseMemoryInst &Earlier,
966 const ParseMemoryInst &Later);
967
969
970
972 if (auto *II = dyn_cast(Inst)) {
973 switch (II->getIntrinsicID()) {
974 case Intrinsic::masked_load:
976 break;
977 case Intrinsic::masked_store:
979 break;
980 default:
982 }
983 } else {
984 V = isa(Inst) ? Inst : cast(Inst)->getValueOperand();
985 }
986
987 return V->getType() == ExpectedType ? V : nullptr;
988 }
989
990
991
992 bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt);
993
994 bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
996
997 bool isNonTargetIntrinsicMatch(const IntrinsicInst *Earlier,
999 auto IsSubmask = [](const Value *Mask0, const Value *Mask1) {
1000
1001 if (Mask0 == Mask1)
1002 return true;
1003 if (isa(Mask0) || isa(Mask1))
1004 return false;
1005 auto *Vec0 = dyn_cast(Mask0);
1006 auto *Vec1 = dyn_cast(Mask1);
1007 if (!Vec0 || !Vec1)
1008 return false;
1009 if (Vec0->getType() != Vec1->getType())
1010 return false;
1011 for (int i = 0, e = Vec0->getNumOperands(); i != e; ++i) {
1014 auto *Int0 = dyn_cast(Elem0);
1015 if (Int0 && Int0->isZero())
1016 continue;
1017 auto *Int1 = dyn_cast(Elem1);
1018 if (Int1 && ->isZero())
1019 continue;
1020 if (isa(Elem0) || isa(Elem1))
1021 return false;
1022 if (Elem0 == Elem1)
1023 continue;
1024 return false;
1025 }
1026 return true;
1027 };
1029 if (II->getIntrinsicID() == Intrinsic::masked_load)
1030 return II->getOperand(0);
1031 if (II->getIntrinsicID() == Intrinsic::masked_store)
1032 return II->getOperand(1);
1034 };
1036 if (II->getIntrinsicID() == Intrinsic::masked_load)
1037 return II->getOperand(2);
1038 if (II->getIntrinsicID() == Intrinsic::masked_store)
1039 return II->getOperand(3);
1041 };
1043 if (II->getIntrinsicID() == Intrinsic::masked_load)
1044 return II->getOperand(3);
1046 };
1047
1048 if (PtrOp(Earlier) != PtrOp(Later))
1049 return false;
1050
1053
1054
1055 if (IDE == Intrinsic::masked_load && IDL == Intrinsic::masked_load) {
1056
1057
1058
1059
1060
1061 if (MaskOp(Earlier) == MaskOp(Later) && ThruOp(Earlier) == ThruOp(Later))
1062 return true;
1063 if (!isa(ThruOp(Later)))
1064 return false;
1065 return IsSubmask(MaskOp(Later), MaskOp(Earlier));
1066 }
1067 if (IDE == Intrinsic::masked_store && IDL == Intrinsic::masked_load) {
1068
1069
1070
1071
1072 if (!IsSubmask(MaskOp(Later), MaskOp(Earlier)))
1073 return false;
1074 return isa(ThruOp(Later));
1075 }
1076 if (IDE == Intrinsic::masked_load && IDL == Intrinsic::masked_store) {
1077
1078
1079
1080 return IsSubmask(MaskOp(Later), MaskOp(Earlier));
1081 }
1082 if (IDE == Intrinsic::masked_store && IDL == Intrinsic::masked_store) {
1083
1084
1085
1086
1087 return IsSubmask(MaskOp(Earlier), MaskOp(Later));
1088 }
1089 return false;
1090 }
1091
1093 if (!MSSA)
1094 return;
1097
1098
1099
1100
1101
1102
1103 MSSAUpdater->removeMemoryAccess(&Inst, true);
1104 }
1105};
1106
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
1126 unsigned LaterGeneration,
1129
1130 if (EarlierGeneration == LaterGeneration)
1131 return true;
1132
1133 if (!MSSA)
1134 return false;
1135
1136
1137
1138
1139
1140
1141
1142
1144 if (!EarlierMA)
1145 return true;
1147 if (!LaterMA)
1148 return true;
1149
1150
1151
1152
1153
1157 ClobberCounter++;
1158 } else
1159 LaterDef = LaterMA->getDefiningAccess();
1160
1161 return MSSA->dominates(LaterDef, EarlierMA);
1162}
1163
1164bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) {
1165
1166
1167 if (auto *LI = dyn_cast(I))
1168 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1169 return true;
1170
1172 if (!MemLocOpt)
1173
1174
1175 return false;
1177 if (!AvailableInvariants.count(MemLoc))
1178 return false;
1179
1180
1181
1182 return AvailableInvariants.lookup(MemLoc) <= GenAt;
1183}
1184
1185bool EarlyCSE::handleBranchCondition(Instruction *CondInst,
1196 if (Opcode == Instruction::And &&
1198 return true;
1199 else if (Opcode == Instruction::Or &&
1201 return true;
1202 return false;
1203 };
1204
1205
1206
1207 unsigned PropagateOpcode =
1208 (BI->getSuccessor(0) == BB) ? Instruction::And : Instruction::Or;
1209
1210 bool MadeChanges = false;
1214 while (!WorkList.empty()) {
1216
1217 AvailableValues.insert(Curr, TorF);
1218 LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
1219 << Curr->getName() << "' as " << *TorF << " in "
1220 << BB->getName() << "\n");
1222 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1223 } else {
1224
1227 NumCSECVP += Count;
1228 MadeChanges = true;
1229 }
1230 }
1231
1233 if (MatchBinOp(Curr, PropagateOpcode, LHS, RHS))
1234 for (auto *Op : { LHS, RHS })
1235 if (Instruction *OPI = dyn_cast(Op))
1236 if (SimpleValue::canHandle(OPI) && Visited.insert(OPI).second)
1238 }
1239
1240 return MadeChanges;
1241}
1242
1243Value *EarlyCSE::getMatchingValue(LoadValue &InVal, ParseMemoryInst &MemInst,
1244 unsigned CurrentGeneration) {
1245 if (InVal.DefInst == nullptr)
1246 return nullptr;
1247 if (InVal.MatchingId != MemInst.getMatchingId())
1248 return nullptr;
1249
1250 if (MemInst.isVolatile() || !MemInst.isUnordered())
1251 return nullptr;
1252
1253 if (MemInst.isLoad() && !InVal.IsAtomic && MemInst.isAtomic())
1254 return nullptr;
1255
1256
1257
1258
1259 bool MemInstMatching = !MemInst.isLoad();
1260 Instruction *Matching = MemInstMatching ? MemInst.get() : InVal.DefInst;
1261 Instruction *Other = MemInstMatching ? InVal.DefInst : MemInst.get();
1262
1263
1264
1266 ? getOrCreateResult(Matching, Other->getType())
1267 : nullptr;
1268 if (MemInst.isStore() && InVal.DefInst != Result)
1269 return nullptr;
1270
1271
1272 bool MatchingNTI = isHandledNonTargetIntrinsic(Matching);
1273 bool OtherNTI = isHandledNonTargetIntrinsic(Other);
1274 if (OtherNTI != MatchingNTI)
1275 return nullptr;
1276 if (OtherNTI && MatchingNTI) {
1277 if (!isNonTargetIntrinsicMatch(cast(InVal.DefInst),
1278 cast(MemInst.get())))
1279 return nullptr;
1280 }
1281
1282 if (!isOperatingOnInvariantMemAt(MemInst.get(), InVal.Generation) &&
1283 !isSameMemGeneration(InVal.Generation, CurrentGeneration, InVal.DefInst,
1284 MemInst.get()))
1285 return nullptr;
1286
1287 if (!Result)
1288 Result = getOrCreateResult(Matching, Other->getType());
1290}
1291
1293 if (auto *I = dyn_cast(To)) {
1294
1295
1296
1297
1298
1299
1300 if (isa(I) ||
1303 }
1304 if (isa(&From) && isa(To)) {
1305
1306
1307
1308
1309
1310
1311
1312
1313
1315 cast(To)->tryIntersectAttributes(cast(&From));
1316 assert(Success && "Failed to intersect attributes in callsites that "
1317 "passed identical check");
1318
1320 }
1321}
1322
1323bool EarlyCSE::overridingStores(const ParseMemoryInst &Earlier,
1324 const ParseMemoryInst &Later) {
1325
1326
1327 assert(Earlier.isUnordered() && !Earlier.isVolatile() &&
1328 "Violated invariant");
1329 if (Earlier.getPointerOperand() != Later.getPointerOperand())
1330 return false;
1331 if (!Earlier.getValueType() || !Later.getValueType() ||
1332 Earlier.getValueType() != Later.getValueType())
1333 return false;
1334 if (Earlier.getMatchingId() != Later.getMatchingId())
1335 return false;
1336
1337
1338
1339
1340
1341 if (!Earlier.isUnordered() || !Later.isUnordered())
1342 return false;
1343
1344
1345 bool ENTI = isHandledNonTargetIntrinsic(Earlier.get());
1346 bool LNTI = isHandledNonTargetIntrinsic(Later.get());
1347 if (ENTI && LNTI)
1348 return isNonTargetIntrinsicMatch(cast(Earlier.get()),
1349 cast(Later.get()));
1350
1351
1352
1353
1354 return ENTI == LNTI;
1355}
1356
1358 bool Changed = false;
1360
1361
1362
1363
1364
1365
1366
1368 ++CurrentGeneration;
1369
1370
1371
1372
1373
1374
1375
1377 auto *BI = dyn_cast(Pred->getTerminator());
1379 auto *CondInst = dyn_cast(BI->getCondition());
1380 if (CondInst && SimpleValue::canHandle(CondInst))
1381 Changed |= handleBranchCondition(CondInst, BI, BB, Pred);
1382 }
1383 }
1384
1385
1386
1387
1388
1390
1391
1392
1394
1396 LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << Inst << '\n');
1398 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1399 continue;
1400 }
1401
1404 removeMSSA(Inst);
1406 Changed = true;
1407 ++NumSimplify;
1408 continue;
1409 }
1410
1411
1412
1413
1414
1415 if (auto *Assume = dyn_cast(&Inst)) {
1416 auto *CondI = dyn_cast(Assume->getArgOperand(0));
1417 if (CondI && SimpleValue::canHandle(CondI)) {
1418 LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << Inst
1419 << '\n');
1421 } else
1422 LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << Inst << '\n');
1423 continue;
1424 }
1425
1426
1427 if (match(&Inst,
1428 m_IntrinsicIntrinsic::experimental\_noalias\_scope\_decl())) {
1429 LLVM_DEBUG(dbgs() << "EarlyCSE skipping noalias intrinsic: " << Inst
1430 << '\n');
1431 continue;
1432 }
1433
1434
1435 if (match(&Inst, m_IntrinsicIntrinsic::sideeffect())) {
1436 LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << Inst << '\n');
1437 continue;
1438 }
1439
1440
1441 if (match(&Inst, m_IntrinsicIntrinsic::pseudoprobe())) {
1442 LLVM_DEBUG(dbgs() << "EarlyCSE skipping pseudoprobe: " << Inst << '\n');
1443 continue;
1444 }
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459 if (match(&Inst, m_IntrinsicIntrinsic::invariant\_start())) {
1460
1462 continue;
1465
1466 if (!AvailableInvariants.count(MemLoc))
1467 AvailableInvariants.insert(MemLoc, CurrentGeneration);
1468 continue;
1469 }
1470
1472 if (auto *CondI =
1473 dyn_cast(cast(Inst).getArgOperand(0))) {
1474 if (SimpleValue::canHandle(CondI)) {
1475
1476 if (auto *KnownCond = AvailableValues.lookup(CondI)) {
1477
1478 if (isa(KnownCond) &&
1479 cast(KnownCond)->isOne()) {
1481 << "EarlyCSE removing guard: " << Inst << '\n');
1483 removeMSSA(Inst);
1485 Changed = true;
1486 continue;
1487 } else
1488
1489 cast(Inst).setArgOperand(0, KnownCond);
1490 }
1491
1492
1494 }
1495 }
1496
1497
1498
1499
1500 LastStore = nullptr;
1501 continue;
1502 }
1503
1504
1505
1507 LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << Inst << " to: " << *V
1508 << '\n');
1510 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1511 } else {
1512 bool Killed = false;
1515 Changed = true;
1516 }
1519 removeMSSA(Inst);
1521 Changed = true;
1522 Killed = true;
1523 }
1524 if (Changed)
1525 ++NumSimplify;
1526 if (Killed)
1527 continue;
1528 }
1529 }
1530
1531
1532 if (SimpleValue::canHandle(&Inst)) {
1533 if ([[maybe_unused]] auto *CI = dyn_cast(&Inst)) {
1535 "Unexpected ebStrict from SimpleValue::canHandle()");
1536 assert((!CI->getRoundingMode() ||
1537 CI->getRoundingMode() != RoundingMode::Dynamic) &&
1538 "Unexpected dynamic rounding from SimpleValue::canHandle()");
1539 }
1540
1541 if (Value *V = AvailableValues.lookup(&Inst)) {
1542 LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << Inst << " to: " << *V
1543 << '\n');
1545 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1546 continue;
1547 }
1551 removeMSSA(Inst);
1553 Changed = true;
1554 ++NumCSE;
1555 continue;
1556 }
1557
1558
1559 AvailableValues.insert(&Inst, &Inst);
1560 continue;
1561 }
1562
1563 ParseMemoryInst MemInst(&Inst, TTI);
1564
1565 if (MemInst.isValid() && MemInst.isLoad()) {
1566
1567
1568 if (MemInst.isVolatile() || !MemInst.isUnordered()) {
1569 LastStore = nullptr;
1570 ++CurrentGeneration;
1571 }
1572
1573 if (MemInst.isInvariantLoad()) {
1574
1575
1576
1577
1578
1580 if (!AvailableInvariants.count(MemLoc))
1581 AvailableInvariants.insert(MemLoc, CurrentGeneration);
1582 }
1583
1584
1585
1586
1587
1588
1589
1590
1591 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1594 << " to: " << *InVal.DefInst << '\n');
1596 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1597 continue;
1598 }
1599 if (InVal.IsLoad)
1600 if (auto *I = dyn_cast(Op))
1605 removeMSSA(Inst);
1607 Changed = true;
1608 ++NumCSELoad;
1609 continue;
1610 }
1611
1612
1613 AvailableLoads.insert(MemInst.getPointerOperand(),
1614 LoadValue(&Inst, CurrentGeneration,
1615 MemInst.getMatchingId(),
1616 MemInst.isAtomic(),
1617 MemInst.isLoad()));
1618 LastStore = nullptr;
1619 continue;
1620 }
1621
1622
1623
1624
1625
1626
1627
1629 !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
1630 LastStore = nullptr;
1631
1632
1633 if (CallValue::canHandle(&Inst)) {
1634
1635
1636 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(&Inst);
1637 if (InVal.first != nullptr &&
1638 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
1639 &Inst)) {
1641 << " to: " << *InVal.first << '\n');
1643 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1644 continue;
1645 }
1650 removeMSSA(Inst);
1652 Changed = true;
1653 ++NumCSECall;
1654 continue;
1655 }
1656
1657
1658 AvailableCalls.insert(&Inst, std::make_pair(&Inst, CurrentGeneration));
1659 continue;
1660 }
1661
1662
1663 if (GEPValue::canHandle(&Inst)) {
1664 auto *GEP = cast(&Inst);
1666 GEPValue GEPVal(GEP, GEP->accumulateConstantOffset(SQ.DL, Offset)
1667 ? Offset.trySExtValue()
1668 : std::nullopt);
1669 if (Value *V = AvailableGEPs.lookup(GEPVal)) {
1670 LLVM_DEBUG(dbgs() << "EarlyCSE CSE GEP: " << Inst << " to: " << *V
1671 << '\n');
1675 removeMSSA(Inst);
1677 Changed = true;
1678 ++NumCSEGEP;
1679 continue;
1680 }
1681
1682
1683 AvailableGEPs.insert(GEPVal, &Inst);
1684 continue;
1685 }
1686
1687
1688
1689
1690
1691
1692 if (auto *FI = dyn_cast(&Inst))
1693 if (FI->getOrdering() == AtomicOrdering::Release) {
1695 continue;
1696 }
1697
1698
1699
1700
1701
1702
1703 if (MemInst.isValid() && MemInst.isStore()) {
1704 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1705 if (InVal.DefInst &&
1706 InVal.DefInst == getMatchingValue(InVal, MemInst, CurrentGeneration)) {
1707
1708
1709
1710
1711 assert((!LastStore ||
1713 MemInst.getPointerOperand() ||
1714 MSSA) &&
1715 "can't have an intervening store if not using MemorySSA!");
1716 LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << Inst << '\n');
1718 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1719 continue;
1720 }
1722 removeMSSA(Inst);
1724 Changed = true;
1725 ++NumDSE;
1726
1727
1728 continue;
1729 }
1730 }
1731
1732
1733
1734
1736 ++CurrentGeneration;
1737
1738 if (MemInst.isValid() && MemInst.isStore()) {
1739
1740
1741 if (LastStore) {
1742 if (overridingStores(ParseMemoryInst(LastStore, TTI), MemInst)) {
1743 LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
1744 << " due to: " << Inst << '\n');
1746 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1747 } else {
1749 removeMSSA(*LastStore);
1751 Changed = true;
1752 ++NumDSE;
1753 LastStore = nullptr;
1754 }
1755 }
1756
1757 }
1758
1759
1760
1761
1762
1763
1764 AvailableLoads.insert(MemInst.getPointerOperand(),
1765 LoadValue(&Inst, CurrentGeneration,
1766 MemInst.getMatchingId(),
1767 MemInst.isAtomic(),
1768 MemInst.isLoad()));
1769
1770
1771
1772
1773
1774
1775
1776
1777 if (MemInst.isUnordered() && !MemInst.isVolatile())
1778 LastStore = &Inst;
1779 else
1780 LastStore = nullptr;
1781 }
1782 }
1783 }
1784
1785 return Changed;
1786}
1787
1788bool EarlyCSE::run() {
1789
1790
1791
1792
1793
1794 std::deque<StackNode *> nodesToProcess;
1795
1796 bool Changed = false;
1797
1798
1799 nodesToProcess.push_back(new StackNode(
1800 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1801 AvailableGEPs, CurrentGeneration, DT.getRootNode(),
1803
1804 assert(!CurrentGeneration && "Create a new EarlyCSE instance to rerun it.");
1805
1806
1807 while (!nodesToProcess.empty()) {
1808
1809
1810 StackNode *NodeToProcess = nodesToProcess.back();
1811
1812
1814
1815
1817
1818 Changed |= processNode(NodeToProcess->node());
1820 NodeToProcess->process();
1821 } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1822
1824 nodesToProcess.push_back(new StackNode(
1825 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1827 child->begin(), child->end()));
1828 } else {
1829
1830
1831 delete NodeToProcess;
1832 nodesToProcess.pop_back();
1833 }
1834 }
1835
1836 return Changed;
1837}
1838
1845 auto *MSSA =
1847
1848 EarlyCSE CSE(F.getDataLayout(), TLI, TTI, DT, AC, MSSA);
1849
1850 if (.run())
1852
1857 return PA;
1858}
1859
1863 OS, MapClassName2PassName);
1864 OS << '<';
1866 OS << "memssa";
1867 OS << '>';
1868}
1869
1870namespace {
1871
1872
1873
1874
1875
1876
1877
1878
1879template
1880class EarlyCSELegacyCommonPass : public FunctionPass {
1881public:
1882 static char ID;
1883
1885 if (UseMemorySSA)
1887 else
1889 }
1890
1892 if (skipFunction(F))
1893 return false;
1894
1895 auto &TLI = getAnalysis().getTLI(F);
1896 auto &TTI = getAnalysis().getTTI(F);
1897 auto &DT = getAnalysis().getDomTree();
1898 auto &AC = getAnalysis().getAssumptionCache(F);
1899 auto *MSSA =
1900 UseMemorySSA ? &getAnalysis().getMSSA() : nullptr;
1901
1902 EarlyCSE CSE(F.getDataLayout(), TLI, TTI, DT, AC, MSSA);
1903
1904 return CSE.run();
1905 }
1906
1907 void getAnalysisUsage(AnalysisUsage &AU) const override {
1912 if (UseMemorySSA) {
1916 }
1920 }
1921};
1922
1923}
1924
1926
1927template<>
1928char EarlyCSELegacyPass::ID = 0;
1929
1931 false)
1937
1938using EarlyCSEMemSSALegacyPass =
1939 EarlyCSELegacyCommonPass<true>;
1940
1941template<>
1942char EarlyCSEMemSSALegacyPass::ID = 0;
1943
1945 if (UseMemorySSA)
1946 return new EarlyCSEMemSSALegacyPass();
1947 else
1949}
1950
1952 "Early CSE w/ MemorySSA", false, false)
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
Atomic ordering constants.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Optimize for code generation
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines DenseMapInfo traits for DenseMap.
std::optional< std::vector< StOtherPiece > > Other
static cl::opt< bool > EarlyCSEDebugHash("earlycse-debug-hash", cl::init(false), cl::Hidden, cl::desc("Perform extra assertion checking to verify that SimpleValue's hash " "function is well-behaved w.r.t. its isEqual predicate"))
static void combineIRFlags(Instruction &From, Value *To)
EarlyCSELegacyCommonPass< false > EarlyCSELegacyPass
static unsigned getHashValueImpl(SimpleValue Val)
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS)
static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A, Value *&B, SelectPatternFlavor &Flavor)
Match a 'select' including an optional 'not's of the condition.
static unsigned hashCallInst(CallInst *CI)
static cl::opt< unsigned > EarlyCSEMssaOptCap("earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden, cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange " "for faster compile. Caps the MemorySSA clobbering calls."))
This file provides the interface for a simple, fast CSE pass.
static bool runOnFunction(Function &F, bool PostInlining)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This is the interface for a simple mod/ref and alias analysis over globals.
This header defines various interfaces for pass management in LLVM.
Value * getMatchingValue(LoadValue LV, LoadInst *LI, unsigned CurrentGeneration, BatchAAResults &BAA, function_ref< MemorySSA *()> GetMSSA)
static void cse(BasicBlock *BB)
Perform cse of induction variable instructions.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This pass exposes codegen information to IR-level passes.
unsigned currentGeneration() const
unsigned childGeneration() const
DomTreeNode::const_iterator end() const
DomTreeNode * nextChild()
DomTreeNode::const_iterator childIter() const
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Conditional or Unconditional Branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Represents analyses that only rely on functions' control flow.
bool onlyReadsMemory(unsigned OpNo) const
bool isConvergent() const
Determine if the invoke is convergent.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
static bool shouldExecute(unsigned CounterName)
typename SmallVector< DomTreeNodeBase *, 4 >::const_iterator const_iterator
Analysis pass which computes a DominatorTree.
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
bool isPresplitCoroutine() const
Determine if the function is presplit coroutine.
Represents calls to the gc.relocate intrinsic.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction inserts a struct field of array element value into an aggregate value.
bool mayThrow(bool IncludePhaseOneUnwind=false) const LLVM_READONLY
Return true if this instruction may throw an exception.
bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
const Function * getFunction() const
Return the function this instruction belongs to.
Type * getAccessType() const LLVM_READONLY
Return the type this instruction accesses in memory, if any.
bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
static std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
An analysis that produces MemorySSA for a function.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Legacy analysis pass which computes MemorySSA.
Encapsulates MemorySSA, including all data associated with memory accesses.
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
MemorySSAWalker * getWalker()
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
void preserve()
Mark an analysis as preserved.
RecyclingAllocator - This class wraps an Allocator, adding the functionality of recycling deleted obj...
ScopedHashTableScope< K, V, KInfo, AllocatorTy > ScopeTy
ScopeTy - This is a helpful typedef that allows clients to get easy access to the name of the scope f...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType) const
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVoidTy() const
Return true if this is 'void'.
value_op_iterator value_op_end()
Value * getOperand(unsigned i) const
value_op_iterator value_op_begin()
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
StringRef getName() const
Return a constant reference to the value's name.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ ebStrict
This corresponds to "fpexcept.strict".
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
@ Assume
Do not drop type tests (default).
const_iterator end(StringRef path LLVM_LIFETIME_BOUND)
Get end iterator over path.
This is an optimization pass for GlobalISel generic memory operations.
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
void initializeEarlyCSEMemSSALegacyPassPass(PassRegistry &)
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
bool programUndefinedIfPoison(const Instruction *Inst)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
void initializeEarlyCSELegacyPassPass(PassRegistry &)
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
bool VerifyMemorySSA
Enables verification of MemorySSA.
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static unsigned getHashValue(CallValue Val)
static CallValue getTombstoneKey()
static bool isEqual(CallValue LHS, CallValue RHS)
static CallValue getEmptyKey()
static bool isEqual(const GEPValue &LHS, const GEPValue &RHS)
static unsigned getHashValue(const GEPValue &Val)
static GEPValue getTombstoneKey()
static GEPValue getEmptyKey()
static SimpleValue getEmptyKey()
static unsigned getHashValue(SimpleValue Val)
static SimpleValue getTombstoneKey()
static bool isEqual(SimpleValue LHS, SimpleValue RHS)
An information struct used to provide DenseMap with the various necessary components for a given valu...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Information about a load/store intrinsic defined by the target.
A CRTP mix-in to automatically provide informational APIs needed for passes.