LLVM: lib/Transforms/Scalar/EarlyCSE.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
55#include
56#include
57#include
58#include
59
60using namespace llvm;
62
63#define DEBUG_TYPE "early-cse"
64
65STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
66STATISTIC(NumCSE, "Number of instructions CSE'd");
67STATISTIC(NumCSECVP, "Number of compare instructions CVP'd");
68STATISTIC(NumCSELoad, "Number of load instructions CSE'd");
69STATISTIC(NumCSECall, "Number of call instructions CSE'd");
70STATISTIC(NumCSEGEP, "Number of GEP instructions CSE'd");
71STATISTIC(NumDSE, "Number of trivial dead stores removed");
72
74 "Controls which instructions are removed");
75
78 cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange "
79 "for faster compile. Caps the MemorySSA clobbering calls."));
80
83 cl::desc("Perform extra assertion checking to verify that SimpleValue's hash "
84 "function is well-behaved w.r.t. its isEqual predicate"));
85
86
87
88
89
90namespace {
91
92
93struct SimpleValue {
95
97 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
98 }
99
101 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
102 Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
103 }
104
105 static bool canHandle(Instruction *Inst) {
106
107
108
110 if (Function *F = CI->getCalledFunction()) {
111 switch (F->getIntrinsicID()) {
112 case Intrinsic::experimental_constrained_fadd:
113 case Intrinsic::experimental_constrained_fsub:
114 case Intrinsic::experimental_constrained_fmul:
115 case Intrinsic::experimental_constrained_fdiv:
116 case Intrinsic::experimental_constrained_frem:
117 case Intrinsic::experimental_constrained_fptosi:
118 case Intrinsic::experimental_constrained_sitofp:
119 case Intrinsic::experimental_constrained_fptoui:
120 case Intrinsic::experimental_constrained_uitofp:
121 case Intrinsic::experimental_constrained_fcmp:
122 case Intrinsic::experimental_constrained_fcmps: {
124 if (CFP->getExceptionBehavior() &&
126 return false;
127
128
129 if (CFP->getRoundingMode() &&
130 CFP->getRoundingMode() == RoundingMode::Dynamic)
131 return false;
132 return true;
133 }
134 }
135 }
136 return CI->doesNotAccessMemory() &&
137
138
139
140
141
142
143
144 !CI->getFunction()->isPresplitCoroutine();
145 }
152 }
153};
154
155}
156
161
165
167 static bool isEqual(SimpleValue LHS, SimpleValue RHS);
168};
169
170
174
176 return false;
177
178
181 Cond = CondNot;
183 }
184
185
186
187
188
189
192
194
195
196
198 return true;
200 }
201
202 switch (Pred) {
207
212 default: break;
213 }
214
215 return true;
216}
217
228
231
233 Value *LHS = BinOp->getOperand(0);
234 Value *RHS = BinOp->getOperand(1);
235 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
237
239 }
240
242
243
244
245
246 Value *LHS = CI->getOperand(0);
247 Value *RHS = CI->getOperand(1);
250 if (std::tie(LHS, Pred) > std::tie(RHS, SwappedPred)) {
252 Pred = SwappedPred;
253 }
255 }
256
257
261
262
263
264
265
271 }
272
273
274
275
280
281
282
286 }
289 }
290
292 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
293
295 return hash_combine(FI->getOpcode(), FI->getOperand(0));
296
298 return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
300
302 return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
304
308 "Invalid/unknown instruction");
309
310
312 if (II && II->isCommutative() && II->arg_size() >= 2) {
313 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
319 }
320
321
322
323
325 return hash_combine(GCR->getOpcode(), GCR->getOperand(0),
326 GCR->getBasePtr(), GCR->getDerivedPtr());
327
328
329
332
333
336}
337
339#ifndef NDEBUG
340
341
342
343
345 return 0;
346#endif
348}
349
352
353 if (LHS.isSentinel() || RHS.isSentinel())
354 return LHSI == RHSI;
355
356 if (LHSI->getOpcode() != RHSI->getOpcode())
357 return false;
359
360
361
363 CI && CI->isConvergent() && LHSI->getParent() != RHSI->getParent())
364 return false;
365
366 return true;
367 }
368
369
371 if (!LHSBinOp->isCommutative())
372 return false;
373
375 "same opcode, but different instruction type?");
377
378
379 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
380 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
381 }
384 "same opcode, but different instruction type?");
386
387 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
388 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
389 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
390 }
391
394 if (LII && RII && LII->getIntrinsicID() == RII->getIntrinsicID() &&
395 LII->isCommutative() && LII->arg_size() >= 2) {
396 return LII->getArgOperand(0) == RII->getArgOperand(1) &&
397 LII->getArgOperand(1) == RII->getArgOperand(0) &&
398 std::equal(LII->arg_begin() + 2, LII->arg_end(),
399 RII->arg_begin() + 2, RII->arg_end()) &&
400 LII->hasSameSpecialState(RII, false,
401 true);
402 }
403
404
407 return GCR1->getOperand(0) == GCR2->getOperand(0) &&
408 GCR1->getBasePtr() == GCR2->getBasePtr() &&
409 GCR1->getDerivedPtr() == GCR2->getDerivedPtr();
410
411
412
413
415 Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB;
418 if (LSPF == RSPF) {
419
422 return ((LHSA == RHSA && LHSB == RHSB) ||
423 (LHSA == RHSB && LHSB == RHSA));
424
425
426 if (CondL == CondR && LHSA == RHSA && LHSB == RHSB)
427 return true;
428 }
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448 if (LHSA == RHSB && LHSB == RHSA) {
454 return true;
455 }
456 }
457
458 return false;
459}
460
462
463
465 assert(!Result || (LHS.isSentinel() && LHS.Inst == RHS.Inst) ||
468}
469
470
471
472
473
474namespace {
475
476
477
478struct CallValue {
480
481 CallValue(Instruction *I) : Inst(I) {
482 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
483 }
484
486 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
487 Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
488 }
489
490 static bool canHandle(Instruction *Inst) {
493
494
495
496
497
498
499
501 return false;
502 return true;
503 }
504};
505
506}
507
512
516
518 static bool isEqual(CallValue LHS, CallValue RHS);
519};
520
523
524
526}
527
529 if (LHS.isSentinel() || RHS.isSentinel())
530 return LHS.Inst == RHS.Inst;
531
534
535
536
537
539 return false;
540
542}
543
544
545
546
547
548namespace {
549
550struct GEPValue {
552 std::optional<int64_t> ConstantOffset;
553
554 GEPValue(Instruction *I) : Inst(I) {
555 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
556 }
557
558 GEPValue(Instruction *I, std::optional<int64_t> ConstantOffset)
559 : Inst(I), ConstantOffset(ConstantOffset) {
560 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
561 }
562
564 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
565 Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
566 }
567
568 static bool canHandle(Instruction *Inst) {
570 }
571};
572
573}
574
579
583
585 static bool isEqual(const GEPValue &LHS, const GEPValue &RHS);
586};
587
590 if (Val.ConstantOffset.has_value())
592 Val.ConstantOffset.value());
595}
596
598 if (LHS.isSentinel() || RHS.isSentinel())
599 return LHS.Inst == RHS.Inst;
602 if (LGEP->getPointerOperand() != RGEP->getPointerOperand())
603 return false;
604 if (LHS.ConstantOffset.has_value() && RHS.ConstantOffset.has_value())
605 return LHS.ConstantOffset.value() == RHS.ConstantOffset.value();
606 return LGEP->isIdenticalToWhenDefined(RGEP);
607}
608
609
610
611
612
613namespace {
614
615
616
617
618
619
620
621
622class EarlyCSE {
623public:
624 const TargetLibraryInfo &TLI;
625 const TargetTransformInfo &TTI;
626 DominatorTree &DT;
627 AssumptionCache &AC;
628 const SimplifyQuery SQ;
630 std::unique_ptr MSSAUpdater;
631
632 using AllocatorTy =
634 ScopedHashTableVal<SimpleValue, Value *>>;
635 using ScopedHTType =
636 ScopedHashTable<SimpleValue, Value *, DenseMapInfo,
637 AllocatorTy>;
638
639
640
641
642
643
644
645 ScopedHTType AvailableValues;
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661 struct LoadValue {
663 unsigned Generation = 0;
664 int MatchingId = -1;
665 bool IsAtomic = false;
666 bool IsLoad = false;
667
668 LoadValue() = default;
669 LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
670 bool IsAtomic, bool IsLoad)
671 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
672 IsAtomic(IsAtomic), IsLoad(IsLoad) {}
673 };
674
675 using LoadMapAllocator =
677 ScopedHashTableVal<Value *, LoadValue>>;
678 using LoadHTType =
679 ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
680 LoadMapAllocator>;
681
682 LoadHTType AvailableLoads;
683
684
685
686
687 using InvariantMapAllocator =
689 ScopedHashTableVal<MemoryLocation, unsigned>>;
690 using InvariantHTType =
691 ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo,
692 InvariantMapAllocator>;
693 InvariantHTType AvailableInvariants;
694
695
696
697
698
699 using CallHTType =
700 ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
701 CallHTType AvailableCalls;
702
703 using GEPMapAllocatorTy =
705 ScopedHashTableVal<GEPValue, Value *>>;
706 using GEPHTType = ScopedHashTable<GEPValue, Value *, DenseMapInfo,
707 GEPMapAllocatorTy>;
708 GEPHTType AvailableGEPs;
709
710
711 unsigned CurrentGeneration = 0;
712
713
714 EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
715 const TargetTransformInfo &TTI, DominatorTree &DT,
716 AssumptionCache &AC, MemorySSA *MSSA)
717 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
718 MSSAUpdater(std::make_unique(MSSA)) {}
719
720 bool run();
721
722private:
723 unsigned ClobberCounter = 0;
724
725
726
727 class NodeScope {
728 public:
729 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
730 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
731 GEPHTType &AvailableGEPs)
732 : Scope(AvailableValues), LoadScope(AvailableLoads),
733 InvariantScope(AvailableInvariants), CallScope(AvailableCalls),
734 GEPScope(AvailableGEPs) {}
735 NodeScope(const NodeScope &) = delete;
736 NodeScope &operator=(const NodeScope &) = delete;
737
738 private:
744 };
745
746
747
748
749
750 class StackNode {
751 public:
752 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
753 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
754 GEPHTType &AvailableGEPs, unsigned cg, DomTreeNode *n,
757 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
758 EndIter(end),
759 Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
760 AvailableCalls, AvailableGEPs) {}
761 StackNode(const StackNode &) = delete;
762 StackNode &operator=(const StackNode &) = delete;
763
764
765 unsigned currentGeneration() const { return CurrentGeneration; }
766 unsigned childGeneration() const { return ChildGeneration; }
770
773 ++ChildIter;
774 return child;
775 }
776
778 bool isProcessed() const { return Processed; }
779 void process() { Processed = true; }
780
781 private:
782 unsigned CurrentGeneration;
783 unsigned ChildGeneration;
787 NodeScope Scopes;
788 bool Processed = false;
789 };
790
791
792
793 class ParseMemoryInst {
794 public:
795 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
796 : Inst(Inst) {
798 IntrID = II->getIntrinsicID();
800 return;
801 if (isHandledNonTargetIntrinsic(IntrID)) {
802 switch (IntrID) {
803 case Intrinsic::masked_load:
804 Info.PtrVal = Inst->getOperand(0);
805 Info.MatchingId = Intrinsic::masked_load;
806 Info.ReadMem = true;
807 Info.WriteMem = false;
808 Info.IsVolatile = false;
809 break;
810 case Intrinsic::masked_store:
811 Info.PtrVal = Inst->getOperand(1);
812
813
814
815
816
817
818 Info.MatchingId = Intrinsic::masked_load;
819 Info.ReadMem = false;
820 Info.WriteMem = true;
821 Info.IsVolatile = false;
822 break;
823 }
824 }
825 }
826 }
827
830
831 bool isLoad() const {
832 if (IntrID != 0)
833 return Info.ReadMem;
835 }
836
838 if (IntrID != 0)
839 return Info.WriteMem;
841 }
842
843 bool isAtomic() const {
844 if (IntrID != 0)
845 return Info.Ordering != AtomicOrdering::NotAtomic;
846 return Inst->isAtomic();
847 }
848
849 bool isUnordered() const {
850 if (IntrID != 0)
851 return Info.isUnordered();
852
854 return LI->isUnordered();
856 return SI->isUnordered();
857 }
858
859 return !Inst->isAtomic();
860 }
861
862 bool isVolatile() const {
863 if (IntrID != 0)
864 return Info.IsVolatile;
865
867 return LI->isVolatile();
869 return SI->isVolatile();
870 }
871
872 return true;
873 }
874
877 return LI->hasMetadata(LLVMContext::MD_invariant_load);
878 return false;
879 }
880
882
883
884
885
886
887 int getMatchingId() const {
888 if (IntrID != 0)
889 return Info.MatchingId;
890 return -1;
891 }
892
894 if (IntrID != 0)
895 return Info.PtrVal;
897 }
898
900
901 return Inst->getAccessType();
902 }
903
904 bool mayReadFromMemory() const {
905 if (IntrID != 0)
906 return Info.ReadMem;
907 return Inst->mayReadFromMemory();
908 }
909
910 bool mayWriteToMemory() const {
911 if (IntrID != 0)
912 return Info.WriteMem;
913 return Inst->mayWriteToMemory();
914 }
915
916 private:
918 MemIntrinsicInfo Info;
920 };
921
922
923
924 static bool isHandledNonTargetIntrinsic(Intrinsic::ID ID) {
925 switch (ID) {
926 case Intrinsic::masked_load:
927 case Intrinsic::masked_store:
928 return true;
929 }
930 return false;
931 }
932 static bool isHandledNonTargetIntrinsic(const Value *V) {
934 return isHandledNonTargetIntrinsic(II->getIntrinsicID());
935 return false;
936 }
937
939
940 bool handleBranchCondition(Instruction *CondInst, const BranchInst *BI,
941 const BasicBlock *BB, const BasicBlock *Pred);
942
944 unsigned CurrentGeneration);
945
946 bool overridingStores(const ParseMemoryInst &Earlier,
947 const ParseMemoryInst &Later);
948
949 Value *getOrCreateResult(Instruction *Inst, Type *ExpectedType,
950 bool CanCreate) const {
951
952
955 switch (II->getIntrinsicID()) {
956 case Intrinsic::masked_load:
958 break;
959 case Intrinsic::masked_store:
961 break;
962 default:
963 return TTI.getOrCreateResultFromMemIntrinsic(II, ExpectedType,
964 CanCreate);
965 }
966 } else {
968 }
969
970 return V->getType() == ExpectedType ? V : nullptr;
971 }
972
973
974
975 bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt);
976
977 bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
978 Instruction *EarlierInst, Instruction *LaterInst);
979
980 bool isNonTargetIntrinsicMatch(const IntrinsicInst *Earlier,
981 const IntrinsicInst *Later) {
982 auto IsSubmask = [](const Value *Mask0, const Value *Mask1) {
983
984 if (Mask0 == Mask1)
985 return true;
987 return false;
990 if (!Vec0 || !Vec1)
991 return false;
992 if (Vec0->getType() != Vec1->getType())
993 return false;
994 for (int i = 0, e = Vec0->getNumOperands(); i != e; ++i) {
995 Constant *Elem0 = Vec0->getOperand(i);
996 Constant *Elem1 = Vec1->getOperand(i);
998 if (Int0 && Int0->isZero())
999 continue;
1001 if (Int1 && ->isZero())
1002 continue;
1004 return false;
1005 if (Elem0 == Elem1)
1006 continue;
1007 return false;
1008 }
1009 return true;
1010 };
1011 auto PtrOp = [](const IntrinsicInst *II) {
1012 if (II->getIntrinsicID() == Intrinsic::masked_load)
1013 return II->getOperand(0);
1014 if (II->getIntrinsicID() == Intrinsic::masked_store)
1015 return II->getOperand(1);
1017 };
1018 auto MaskOp = [](const IntrinsicInst *II) {
1019 if (II->getIntrinsicID() == Intrinsic::masked_load)
1020 return II->getOperand(1);
1021 if (II->getIntrinsicID() == Intrinsic::masked_store)
1022 return II->getOperand(2);
1024 };
1025 auto ThruOp = [](const IntrinsicInst *II) {
1026 if (II->getIntrinsicID() == Intrinsic::masked_load)
1027 return II->getOperand(2);
1029 };
1030
1031 if (PtrOp(Earlier) != PtrOp(Later))
1032 return false;
1033
1036
1037
1038 if (IDE == Intrinsic::masked_load && IDL == Intrinsic::masked_load) {
1039
1040
1041
1042
1043
1044 if (MaskOp(Earlier) == MaskOp(Later) && ThruOp(Earlier) == ThruOp(Later))
1045 return true;
1047 return false;
1048 return IsSubmask(MaskOp(Later), MaskOp(Earlier));
1049 }
1050 if (IDE == Intrinsic::masked_store && IDL == Intrinsic::masked_load) {
1051
1052
1053
1054
1055 if (!IsSubmask(MaskOp(Later), MaskOp(Earlier)))
1056 return false;
1058 }
1059 if (IDE == Intrinsic::masked_load && IDL == Intrinsic::masked_store) {
1060
1061
1062
1063 return IsSubmask(MaskOp(Later), MaskOp(Earlier));
1064 }
1065 if (IDE == Intrinsic::masked_store && IDL == Intrinsic::masked_store) {
1066
1067
1068
1069
1070 return IsSubmask(MaskOp(Earlier), MaskOp(Later));
1071 }
1072 return false;
1073 }
1074
1075 void removeMSSA(Instruction &Inst) {
1076 if (!MSSA)
1077 return;
1079 MSSA->verifyMemorySSA();
1080
1081
1082
1083
1084
1085
1086 MSSAUpdater->removeMemoryAccess(&Inst, true);
1087 }
1088};
1089
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
1109 unsigned LaterGeneration,
1112
1113 if (EarlierGeneration == LaterGeneration)
1114 return true;
1115
1116 if (!MSSA)
1117 return false;
1118
1119
1120
1121
1122
1123
1124
1125
1127 if (!EarlierMA)
1128 return true;
1130 if (!LaterMA)
1131 return true;
1132
1133
1134
1135
1136
1137 MemoryAccess *LaterDef;
1140 ClobberCounter++;
1141 } else
1142 LaterDef = LaterMA->getDefiningAccess();
1143
1144 return MSSA->dominates(LaterDef, EarlierMA);
1145}
1146
1147bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) {
1148
1149
1151 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1152 return true;
1153
1155 if (!MemLocOpt)
1156
1157
1158 return false;
1159 MemoryLocation MemLoc = *MemLocOpt;
1160 if (!AvailableInvariants.count(MemLoc))
1161 return false;
1162
1163
1164
1165 return AvailableInvariants.lookup(MemLoc) <= GenAt;
1166}
1167
1168bool EarlyCSE::handleBranchCondition(Instruction *CondInst,
1169 const BranchInst *BI, const BasicBlock *BB,
1170 const BasicBlock *Pred) {
1179 if (Opcode == Instruction::And &&
1181 return true;
1182 else if (Opcode == Instruction::Or &&
1184 return true;
1185 return false;
1186 };
1187
1188
1189
1190 unsigned PropagateOpcode =
1191 (BI->getSuccessor(0) == BB) ? Instruction::And : Instruction::Or;
1192
1193 bool MadeChanges = false;
1194 SmallVector<Instruction *, 4> WorkList;
1195 SmallPtrSet<Instruction *, 4> Visited;
1197 while (!WorkList.empty()) {
1198 Instruction *Curr = WorkList.pop_back_val();
1199
1200 AvailableValues.insert(Curr, TorF);
1201 LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
1202 << Curr->getName() << "' as " << *TorF << " in "
1203 << BB->getName() << "\n");
1204 if (!DebugCounter::shouldExecute(CSECounter)) {
1205 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1206 } else {
1207
1208 if (unsigned Count = replaceDominatedUsesWith(Curr, TorF, DT,
1209 BasicBlockEdge(Pred, BB))) {
1210 NumCSECVP += Count;
1211 MadeChanges = true;
1212 }
1213 }
1214
1216 if (MatchBinOp(Curr, PropagateOpcode, LHS, RHS))
1217 for (auto *Op : { LHS, RHS })
1219 if (SimpleValue::canHandle(OPI) && Visited.insert(OPI).second)
1221 }
1222
1223 return MadeChanges;
1224}
1225
1226Value *EarlyCSE::getMatchingValue(LoadValue &InVal, ParseMemoryInst &MemInst,
1227 unsigned CurrentGeneration) {
1228 if (InVal.DefInst == nullptr)
1229 return nullptr;
1230 if (InVal.MatchingId != MemInst.getMatchingId())
1231 return nullptr;
1232
1233 if (MemInst.isVolatile() || !MemInst.isUnordered())
1234 return nullptr;
1235
1236 if (MemInst.isLoad() && !InVal.IsAtomic && MemInst.isAtomic())
1237 return nullptr;
1238
1239
1240
1241
1242 bool MemInstMatching = !MemInst.isLoad();
1243 Instruction *Matching = MemInstMatching ? MemInst.get() : InVal.DefInst;
1244 Instruction *Other = MemInstMatching ? InVal.DefInst : MemInst.get();
1245
1246
1247
1249 MemInst.isStore()
1250 ? getOrCreateResult(Matching, Other->getType(), false)
1251 : nullptr;
1252 if (MemInst.isStore() && InVal.DefInst != Result)
1253 return nullptr;
1254
1255
1256 bool MatchingNTI = isHandledNonTargetIntrinsic(Matching);
1257 bool OtherNTI = isHandledNonTargetIntrinsic(Other);
1258 if (OtherNTI != MatchingNTI)
1259 return nullptr;
1260 if (OtherNTI && MatchingNTI) {
1263 return nullptr;
1264 }
1265
1266 if (!isOperatingOnInvariantMemAt(MemInst.get(), InVal.Generation) &&
1267 !isSameMemGeneration(InVal.Generation, CurrentGeneration, InVal.DefInst,
1268 MemInst.get()))
1269 return nullptr;
1270
1271 if (!Result)
1272 Result = getOrCreateResult(Matching, Other->getType(), true);
1274}
1275
1278
1279
1280
1281
1282
1283
1286 I->andIRFlags(&From);
1287 }
1289
1290
1291
1292
1293
1294
1295
1296
1297
1300 assert(Success && "Failed to intersect attributes in callsites that "
1301 "passed identical check");
1302
1304 }
1305}
1306
1307bool EarlyCSE::overridingStores(const ParseMemoryInst &Earlier,
1308 const ParseMemoryInst &Later) {
1309
1310
1311 assert(Earlier.isUnordered() && !Earlier.isVolatile() &&
1312 "Violated invariant");
1313 if (Earlier.getPointerOperand() != Later.getPointerOperand())
1314 return false;
1315 if (!Earlier.getValueType() || !Later.getValueType() ||
1316 Earlier.getValueType() != Later.getValueType())
1317 return false;
1318 if (Earlier.getMatchingId() != Later.getMatchingId())
1319 return false;
1320
1321
1322
1323
1324
1325 if (!Earlier.isUnordered() || !Later.isUnordered())
1326 return false;
1327
1328
1329 bool ENTI = isHandledNonTargetIntrinsic(Earlier.get());
1330 bool LNTI = isHandledNonTargetIntrinsic(Later.get());
1331 if (ENTI && LNTI)
1334
1335
1336
1337
1338 return ENTI == LNTI;
1339}
1340
1341bool EarlyCSE::processNode(DomTreeNode *Node) {
1344
1345
1346
1347
1348
1349
1350
1352 ++CurrentGeneration;
1353
1354
1355
1356
1357
1358
1359
1364 if (CondInst && SimpleValue::canHandle(CondInst))
1365 Changed |= handleBranchCondition(CondInst, BI, BB, Pred);
1366 }
1367 }
1368
1369
1370
1371
1372
1374
1375
1376
1378
1380 LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << Inst << '\n');
1382 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1383 continue;
1384 }
1385
1388 removeMSSA(Inst);
1391 ++NumSimplify;
1392 continue;
1393 }
1394
1395
1396
1397
1398
1401 if (CondI && SimpleValue::canHandle(CondI)) {
1402 LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << Inst
1403 << '\n');
1405 } else
1406 LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << Inst << '\n');
1407 continue;
1408 }
1409
1410
1411 if (match(&Inst,
1413 LLVM_DEBUG(dbgs() << "EarlyCSE skipping noalias intrinsic: " << Inst
1414 << '\n');
1415 continue;
1416 }
1417
1418
1420 LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << Inst << '\n');
1421 continue;
1422 }
1423
1424
1426 LLVM_DEBUG(dbgs() << "EarlyCSE skipping pseudoprobe: " << Inst << '\n');
1427 continue;
1428 }
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1444
1446 continue;
1447 MemoryLocation MemLoc =
1449
1450 if (!AvailableInvariants.count(MemLoc))
1451 AvailableInvariants.insert(MemLoc, CurrentGeneration);
1452 continue;
1453 }
1454
1456 if (auto *CondI =
1458 if (SimpleValue::canHandle(CondI)) {
1459
1460 if (auto *KnownCond = AvailableValues.lookup(CondI)) {
1461
1465 << "EarlyCSE removing guard: " << Inst << '\n');
1467 removeMSSA(Inst);
1470 continue;
1471 } else
1472
1474 }
1475
1476
1478 }
1479 }
1480
1481
1482
1483
1484 LastStore = nullptr;
1485 continue;
1486 }
1487
1488
1489
1491 LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << Inst << " to: " << *V
1492 << '\n');
1494 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1495 } else {
1496 bool Killed = false;
1500 }
1503 removeMSSA(Inst);
1506 Killed = true;
1507 }
1509 ++NumSimplify;
1510 if (Killed)
1511 continue;
1512 }
1513 }
1514
1515
1516
1518 LastStore = nullptr;
1519
1520
1521 if (SimpleValue::canHandle(&Inst)) {
1524 "Unexpected ebStrict from SimpleValue::canHandle()");
1525 assert((!CI->getRoundingMode() ||
1526 CI->getRoundingMode() != RoundingMode::Dynamic) &&
1527 "Unexpected dynamic rounding from SimpleValue::canHandle()");
1528 }
1529
1530 if (Value *V = AvailableValues.lookup(&Inst)) {
1531 LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << Inst << " to: " << *V
1532 << '\n');
1534 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1535 continue;
1536 }
1540 removeMSSA(Inst);
1543 ++NumCSE;
1544 continue;
1545 }
1546
1547
1548 AvailableValues.insert(&Inst, &Inst);
1549 continue;
1550 }
1551
1552 ParseMemoryInst MemInst(&Inst, TTI);
1553
1554 if (MemInst.isValid() && MemInst.isLoad()) {
1555
1556
1557 if (MemInst.isVolatile() || !MemInst.isUnordered()) {
1558 LastStore = nullptr;
1559 ++CurrentGeneration;
1560 }
1561
1562 if (MemInst.isInvariantLoad()) {
1563
1564
1565
1566
1567
1569 if (!AvailableInvariants.count(MemLoc))
1570 AvailableInvariants.insert(MemLoc, CurrentGeneration);
1571 }
1572
1573
1574
1575
1576
1577
1578
1579
1580 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1583 << " to: " << *InVal.DefInst << '\n');
1585 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1586 continue;
1587 }
1588 if (InVal.IsLoad)
1594 removeMSSA(Inst);
1597 ++NumCSELoad;
1598 continue;
1599 }
1600
1601
1602 AvailableLoads.insert(MemInst.getPointerOperand(),
1603 LoadValue(&Inst, CurrentGeneration,
1604 MemInst.getMatchingId(),
1605 MemInst.isAtomic(),
1606 MemInst.isLoad()));
1607 LastStore = nullptr;
1608 continue;
1609 }
1610
1611
1612
1613
1614
1615
1617 !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
1618 LastStore = nullptr;
1619
1620
1621
1622
1623
1624 if (CallValue::canHandle(&Inst) &&
1625 (!MemInst.isValid() || !MemInst.isStore()) && (&Inst)) {
1626
1627
1628 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(&Inst);
1629 if (InVal.first != nullptr &&
1630 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
1631 &Inst) &&
1634 << " to: " << *InVal.first << '\n');
1636 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1637 continue;
1638 }
1643 removeMSSA(Inst);
1646 ++NumCSECall;
1647 continue;
1648 }
1649
1650
1651
1653 ++CurrentGeneration;
1654
1655
1656 AvailableCalls.insert(&Inst, std::make_pair(&Inst, CurrentGeneration));
1657 continue;
1658 }
1659
1660
1661 if (GEPValue::canHandle(&Inst)) {
1664 GEPValue GEPVal(GEP, GEP->accumulateConstantOffset(SQ.DL, Offset)
1665 ? Offset.trySExtValue()
1666 : std::nullopt);
1667 if (Value *V = AvailableGEPs.lookup(GEPVal)) {
1668 LLVM_DEBUG(dbgs() << "EarlyCSE CSE GEP: " << Inst << " to: " << *V
1669 << '\n');
1673 removeMSSA(Inst);
1676 ++NumCSEGEP;
1677 continue;
1678 }
1679
1680
1681 AvailableGEPs.insert(GEPVal, &Inst);
1682 continue;
1683 }
1684
1685
1686
1687
1688
1689
1691 if (FI->getOrdering() == AtomicOrdering::Release) {
1693 continue;
1694 }
1695
1696
1697
1698
1699
1700
1701 if (MemInst.isValid() && MemInst.isStore()) {
1702 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1703 if (InVal.DefInst &&
1704 InVal.DefInst ==
1706 LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << Inst << '\n');
1708 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1709 continue;
1710 }
1712 removeMSSA(Inst);
1715 ++NumDSE;
1716
1717
1718 continue;
1719 }
1720 }
1721
1722
1723
1724
1726 ++CurrentGeneration;
1727
1728 if (MemInst.isValid() && MemInst.isStore()) {
1729
1730
1731 if (LastStore) {
1732 if (overridingStores(ParseMemoryInst(LastStore, TTI), MemInst)) {
1733 LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
1734 << " due to: " << Inst << '\n');
1736 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1737 } else {
1739 removeMSSA(*LastStore);
1742 ++NumDSE;
1743 LastStore = nullptr;
1744 }
1745 }
1746
1747 }
1748
1749
1750
1751
1752
1753
1754 AvailableLoads.insert(MemInst.getPointerOperand(),
1755 LoadValue(&Inst, CurrentGeneration,
1756 MemInst.getMatchingId(),
1757 MemInst.isAtomic(),
1758 MemInst.isLoad()));
1759
1760
1761
1762
1763
1764
1765
1766
1767 if (MemInst.isUnordered() && !MemInst.isVolatile())
1768 LastStore = &Inst;
1769 else
1770 LastStore = nullptr;
1771 }
1772 }
1773 }
1774
1776}
1777
1778bool EarlyCSE::run() {
1779
1780
1781
1782
1783
1784 std::deque<StackNode *> nodesToProcess;
1785
1787
1788
1789 nodesToProcess.push_back(new StackNode(
1790 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1791 AvailableGEPs, CurrentGeneration, DT.getRootNode(),
1793
1794 assert(!CurrentGeneration && "Create a new EarlyCSE instance to rerun it.");
1795
1796
1797 while (!nodesToProcess.empty()) {
1798
1799
1800 StackNode *NodeToProcess = nodesToProcess.back();
1801
1802
1804
1805
1807
1808 Changed |= processNode(NodeToProcess->node());
1810 NodeToProcess->process();
1811 } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1812
1814 nodesToProcess.push_back(new StackNode(
1815 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1817 child->begin(), child->end()));
1818 } else {
1819
1820
1821 delete NodeToProcess;
1822 nodesToProcess.pop_back();
1823 }
1824 }
1825
1827}
1828
1835 auto *MSSA =
1837
1838 EarlyCSE CSE(F.getDataLayout(), TLI, TTI, DT, AC, MSSA);
1839
1840 if (.run())
1842
1847 return PA;
1848}
1849
1853 OS, MapClassName2PassName);
1854 OS << '<';
1856 OS << "memssa";
1857 OS << '>';
1858}
1859
1860namespace {
1861
1862
1863
1864
1865
1866
1867
1868
1869template
1870class EarlyCSELegacyCommonPass : public FunctionPass {
1871public:
1872 static char ID;
1873
1875 if (UseMemorySSA)
1877 else
1879 }
1880
1882 if (skipFunction(F))
1883 return false;
1884
1885 auto &TLI = getAnalysis().getTLI(F);
1886 auto &TTI = getAnalysis().getTTI(F);
1887 auto &DT = getAnalysis().getDomTree();
1888 auto &AC = getAnalysis().getAssumptionCache(F);
1889 auto *MSSA =
1890 UseMemorySSA ? &getAnalysis().getMSSA() : nullptr;
1891
1892 EarlyCSE CSE(F.getDataLayout(), TLI, TTI, DT, AC, MSSA);
1893
1894 return CSE.run();
1895 }
1896
1897 void getAnalysisUsage(AnalysisUsage &AU) const override {
1898 AU.addRequired();
1899 AU.addRequired();
1900 AU.addRequired();
1901 AU.addRequired();
1902 if (UseMemorySSA) {
1906 }
1910 }
1911};
1912
1913}
1914
1916
1917template<>
1918char EarlyCSELegacyPass::ID = 0;
1919
1921 false)
1927
1928using EarlyCSEMemSSALegacyPass =
1929 EarlyCSELegacyCommonPass<true>;
1930
1931template<>
1932char EarlyCSEMemSSALegacyPass::ID = 0;
1933
1935 if (UseMemorySSA)
1936 return new EarlyCSEMemSSALegacyPass();
1937 else
1939}
1940
1942 "Early CSE w/ MemorySSA", false, false)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
Atomic ordering constants.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Optimize for code generation
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines DenseMapInfo traits for DenseMap.
static cl::opt< bool > EarlyCSEDebugHash("earlycse-debug-hash", cl::init(false), cl::Hidden, cl::desc("Perform extra assertion checking to verify that SimpleValue's hash " "function is well-behaved w.r.t. its isEqual predicate"))
static void combineIRFlags(Instruction &From, Value *To)
Definition EarlyCSE.cpp:1276
EarlyCSELegacyCommonPass< false > EarlyCSELegacyPass
Definition EarlyCSE.cpp:1915
early cse Early CSE w MemorySSA
Definition EarlyCSE.cpp:1950
static unsigned getHashValueImpl(SimpleValue Val)
Definition EarlyCSE.cpp:229
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS)
Definition EarlyCSE.cpp:350
static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A, Value *&B, SelectPatternFlavor &Flavor)
Match a 'select' including an optional 'not's of the condition.
Definition EarlyCSE.cpp:171
static unsigned hashCallInst(CallInst *CI)
Definition EarlyCSE.cpp:218
static cl::opt< unsigned > EarlyCSEMssaOptCap("earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden, cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange " "for faster compile. Caps the MemorySSA clobbering calls."))
This file provides the interface for a simple, fast CSE pass.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
This header defines various interfaces for pass management in LLVM.
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
Value * getMatchingValue(LoadValue LV, LoadInst *LI, unsigned CurrentGeneration, BatchAAResults &BAA, function_ref< MemorySSA *()> GetMSSA)
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
static bool isInvariantLoad(const Instruction *I, const Value *Ptr, const bool IsKernelFn)
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This pass exposes codegen information to IR-level passes.
unsigned currentGeneration() const
unsigned childGeneration() const
DomTreeNode::const_iterator end() const
DomTreeNode * nextChild()
DomTreeNode::const_iterator childIter() const
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Represents analyses that only rely on functions' control flow.
bool onlyWritesMemory(unsigned OpNo) const
bool onlyReadsMemory(unsigned OpNo) const
bool isConvergent() const
Determine if the invoke is convergent.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
static bool shouldExecute(CounterInfo &Counter)
typename SmallVector< DomTreeNodeBase *, 4 >::const_iterator const_iterator
Analysis pass which computes a DominatorTree.
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
Legacy analysis pass which computes a DominatorTree.
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
bool isPresplitCoroutine() const
Determine if the function is presplit coroutine.
Represents calls to the gc.relocate intrinsic.
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI bool mayThrow(bool IncludePhaseOneUnwind=false) const LLVM_READONLY
Return true if this instruction may throw an exception.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
static LLVM_ABI std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
static LLVM_ABI MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
An analysis that produces MemorySSA for a function.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Legacy analysis pass which computes MemorySSA.
LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
LLVM_ABI MemorySSAWalker * getWalker()
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
ScopedHashTableScope< SimpleValue, Value *, DenseMapInfo< SimpleValue >, AllocatorTy > ScopeTy
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Wrapper pass for TargetTransformInfo.
LLVM_ABI bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
Value * getOperand(unsigned i) const
iterator_range< value_op_iterator > operand_values()
LLVM Value Representation.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ BasicBlock
Various leaf nodes.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_IntrinsicIntrinsic::fabs(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ ebStrict
This corresponds to "fpexcept.strict".
@ Assume
Do not drop type tests (default).
NodeAddr< NodeBase * > Node
Context & getContext() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
decltype(auto) dyn_cast(const From &Val)
dyn_cast - Return the argument parameter cast to the specified type.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
LLVM_ABI void initializeEarlyCSEMemSSALegacyPassPass(PassRegistry &)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
DomTreeNodeBase< BasicBlock > DomTreeNode
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isa(const From &Val)
isa - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
LLVM_ABI FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition EarlyCSE.cpp:1934
LLVM_ABI void initializeEarlyCSELegacyPassPass(PassRegistry &)
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static unsigned getHashValue(CallValue Val)
static CallValue getTombstoneKey()
Definition EarlyCSE.cpp:513
static bool isEqual(CallValue LHS, CallValue RHS)
static CallValue getEmptyKey()
Definition EarlyCSE.cpp:509
static bool isEqual(const GEPValue &LHS, const GEPValue &RHS)
static unsigned getHashValue(const GEPValue &Val)
static GEPValue getTombstoneKey()
Definition EarlyCSE.cpp:580
static GEPValue getEmptyKey()
Definition EarlyCSE.cpp:576
static SimpleValue getEmptyKey()
Definition EarlyCSE.cpp:158
static unsigned getHashValue(SimpleValue Val)
static SimpleValue getTombstoneKey()
Definition EarlyCSE.cpp:162
static bool isEqual(SimpleValue LHS, SimpleValue RHS)
An information struct used to provide DenseMap with the various necessary components for a given valu...
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
Definition EarlyCSE.cpp:1829
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition EarlyCSE.cpp:1850
A CRTP mix-in to automatically provide informational APIs needed for passes.