LLVM: lib/Transforms/Utils/ScalarEvolutionExpander.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
31
32#if LLVM_ENABLE_ABI_BREAKING_CHECKS
33#define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
34#else
35#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
36#endif
37
38using namespace llvm;
39
42 cl::desc("When performing SCEV expansion only if it is cheap to do, this "
43 "controls the budget that is considered cheap (default = 4)"));
44
47
49 NUW = false;
50 NSW = false;
57 NUW = OBO->hasNoUnsignedWrap();
58 NSW = OBO->hasNoSignedWrap();
59 }
61 Exact = PEO->isExact();
65 NNeg = PNI->hasNonNeg();
67 NUW = TI->hasNoUnsignedWrap();
68 NSW = TI->hasNoSignedWrap();
69 }
71 GEPNW = GEP->getNoWrapFlags();
73 SameSign = ICmp->hasSameSign();
74}
75
78 I->setHasNoUnsignedWrap(NUW);
79 I->setHasNoSignedWrap(NSW);
80 }
86 PNI->setNonNeg(NNeg);
88 I->setHasNoUnsignedWrap(NUW);
89 I->setHasNoSignedWrap(NSW);
90 }
95}
96
97
98
99
100Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
103
104
105
106
107
108
109
110
111
113
114 Value *Ret = nullptr;
115
117
119 if (U->getType() != Ty)
120 continue;
123 continue;
124
125
126
127
128 if (IP->getParent() == CI->getParent() && &*BIP != CI &&
130 Ret = CI;
131 break;
132 }
133 }
134 }
135
136
137 if (!Ret) {
138 SCEVInsertPointGuard Guard(Builder, this);
139 Builder.SetInsertPoint(&*IP);
140 Ret = Builder.CreateCast(Op, V, Ty, V->getName());
141 }
142
143
144
145
148
149 return Ret;
150}
151
157 IP = II->getNormalDest()->begin();
158
160 ++IP;
161
163 ++IP;
165 IP = MustDominate->getParent()->getFirstInsertionPt();
166 } else {
167 assert(!IP->isEHPad() && "unexpected eh pad!");
168 }
169
170
171
172
174 ++IP;
175
176 return IP;
177}
178
183 while (!WorkList.empty()) {
185 if (DeletedValues.contains(V))
186 continue;
190 continue;
192 InsertedValues.erase(I);
193 InsertedPostIncValues.erase(I);
195 I->eraseFromParent();
196 }
197}
198
200SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {
201
202
208 ++IP;
209 return IP;
210 }
211
212
215
216
217
219 "Expected the cast argument to be a global/constant");
220 return Builder.GetInsertBlock()
221 ->getParent()
222 ->getEntryBlock()
223 .getFirstInsertionPt();
224}
225
226
227
228
229Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
231 assert((Op == Instruction::BitCast ||
232 Op == Instruction::PtrToInt ||
233 Op == Instruction::IntToPtr) &&
234 "InsertNoopCastOfTo cannot perform non-noop casts!");
235 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
236 "InsertNoopCastOfTo cannot change sizes!");
237
238
239
240
241
242
243 if (Op == Instruction::IntToPtr) {
245 if (DL.isNonIntegralPointerType(PtrTy))
247 }
248
249 if (Op == Instruction::BitCast) {
250 if (V->getType() == Ty)
251 return V;
255 }
256 }
257
258 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
259 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
261 if ((CI->getOpcode() == Instruction::PtrToInt ||
262 CI->getOpcode() == Instruction::IntToPtr) &&
263 SE.getTypeSizeInBits(CI->getType()) ==
267 if ((CE->getOpcode() == Instruction::PtrToInt ||
268 CE->getOpcode() == Instruction::IntToPtr) &&
269 SE.getTypeSizeInBits(CE->getType()) ==
270 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
271 return CE->getOperand(0);
272 }
273
274
277
278
279 return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
280}
281
282
283
284
288
292 return Res;
293
294
295 unsigned ScanLimit = 6;
297
299 if (IP != BlockBegin) {
300 --IP;
301 for (; ScanLimit; --IP, --ScanLimit) {
303
306 return true;
307 if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
308 return true;
309 }
310
311
313 return true;
314 return false;
315 };
316 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
317 IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
318 return &*IP;
319 if (IP == BlockBegin) break;
320 }
321 }
322
323
324 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
325 SCEVInsertPointGuard Guard(Builder, this);
326
327 if (IsSafeToHoist) {
328
329 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
330 if (->isLoopInvariant(LHS) ||
->isLoopInvariant(RHS)) break;
331 BasicBlock *Preheader = L->getLoopPreheader();
332 if (!Preheader) break;
333
334
335 Builder.SetInsertPoint(Preheader->getTerminator());
336 }
337 }
338
339
340
341
348
349 return BO;
350}
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
382 SE.DT.dominates(cast(V), &*Builder.GetInsertPoint()));
383
387
388
391 return Builder.CreatePtrAdd(CLHS, CRHS, "", NW);
392
393
394 unsigned ScanLimit = 6;
396
398 if (IP != BlockBegin) {
399 --IP;
400 for (; ScanLimit; --IP, --ScanLimit) {
402 if (GEP->getPointerOperand() == V &&
403 GEP->getSourceElementType() == Builder.getInt8Ty() &&
404 GEP->getOperand(1) == Idx) {
405 rememberFlags(GEP);
406 GEP->setNoWrapFlags(GEP->getNoWrapFlags() & NW);
407 return &*IP;
408 }
409 }
410 if (IP == BlockBegin) break;
411 }
412 }
413
414
415 SCEVInsertPointGuard Guard(Builder, this);
416
417
418 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
419 if (->isLoopInvariant(V) ||
->isLoopInvariant(Idx)) break;
420 BasicBlock *Preheader = L->getLoopPreheader();
421 if (!Preheader) break;
422
423
424 Builder.SetInsertPoint(Preheader->getTerminator());
425 }
426
427
428 return Builder.CreatePtrAdd(V, Idx, "scevgep", NW);
429}
430
431
432
433
436 if () return B;
437 if () return A;
438 if (A->contains(B)) return B;
439 if (B->contains(A)) return A;
440 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
441 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
442 return A;
443}
444
445
446
447const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
448
449 auto Pair = RelevantLoops.try_emplace(S);
450 if (!Pair.second)
451 return Pair.first->second;
452
456 return nullptr;
470 const Loop *L = nullptr;
472 L = AR->getLoop();
475 return RelevantLoops[S] = L;
476 }
480 return Pair.first->second = SE.LI.getLoopFor(I->getParent());
481
482 return nullptr;
483 }
485 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
486 }
488}
489
490namespace {
491
492
493class LoopCompare {
494 DominatorTree &DT;
495public:
496 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
497
498 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
499 std::pair<const Loop *, const SCEV *> RHS) const {
500
504
505
506 if (LHS.first != RHS.first)
508
509
510
511
512 if (LHS.second->isNonConstantNegative()) {
513 if (.second->isNonConstantNegative())
514 return false;
515 } else if (RHS.second->isNonConstantNegative())
516 return true;
517
518
519 return false;
520 }
521};
522
523}
524
526
527 const SCEV *URemLHS = nullptr;
528 const SCEV *URemRHS = nullptr;
530 Value *LHS = expand(URemLHS);
531 Value *RHS = expand(URemRHS);
533 false);
534 }
535
536
537
538
539
542 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
543
544
545
547
548
549
550 Value *Sum = nullptr;
551 for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
552 const Loop *CurLoop = I->first;
553 const SCEV *Op = I->second;
554 if (!Sum) {
555
556 Sum = expand(Op);
557 ++I;
558 continue;
559 }
560
561 assert(->getType()->isPointerTy() && "Only first op can be pointer");
563
564
566 for (; I != E && I->first == CurLoop; ++I) {
567
568
569 const SCEV *X = I->second;
572 X = SE.getSCEV(U->getValue());
574 }
575 Sum = expandAddToGEP(SE.getAddExpr(NewOps), Sum, S->getNoWrapFlags());
576 } else if (Op->isNonConstantNegative()) {
577
578 Value *W = expand(SE.getNegativeSCEV(Op));
580 true);
581 ++I;
582 } else {
583
585
588 Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
589 true);
590 ++I;
591 }
592 }
593
594 return Sum;
595}
596
599
600
601
604 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
605
606
608
609
610
611 Value *Prod = nullptr;
612 auto I = OpsAndLoops.begin();
613
614
615
616
617 const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops]() {
619
620
622 const uint64_t MaxExponent = UINT64_MAX >> 1;
623
624
625
626
627 while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
629 ++E;
630 }
631 assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
632
633
634
635 Value *P = expand(I->second);
639 for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
641 true);
643 Result = Result ? InsertBinop(Instruction::Mul, Result, P,
645 true)
646 : P;
647 }
648
650 assert(Result && "Nothing was expanded?");
652 };
653
654 while (I != OpsAndLoops.end()) {
655 if (!Prod) {
656
657 Prod = ExpandOpBinPowN();
658 } else if (I->second->isAllOnesValue()) {
659
662 ++I;
663 } else {
664
665 Value *W = ExpandOpBinPowN();
666
668 const APInt *RHS;
670
671 assert(!Ty->isVectorTy() && "vector types are not SCEVable");
673
674 if (RHS->logBase2() == RHS->getBitWidth() - 1)
676 Prod = InsertBinop(Instruction::Shl, Prod,
677 ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
678 true);
679 } else {
680 Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
681 true);
682 }
683 }
684 }
685
686 return Prod;
687}
688
692 const APInt &RHS = SC->getAPInt();
693 if (RHS.isPowerOf2())
694 return InsertBinop(Instruction::LShr, LHS,
695 ConstantInt::get(SC->getType(), RHS.logBase2()),
697 }
698
699 const SCEV *RHSExpr = S->getRHS();
700 Value *RHS = expand(RHSExpr);
701 if (SafeUDivMode) {
702 bool GuaranteedNotPoison =
703 ScalarEvolution::isGuaranteedNotToBePoison(RHSExpr);
704 if (!GuaranteedNotPoison)
705 RHS = Builder.CreateFreeze(RHS);
706
707
708
709
710 if (!SE.isKnownNonZero(RHSExpr) || !GuaranteedNotPoison)
711 RHS = Builder.CreateIntrinsic(RHS->getType(), Intrinsic::umax,
712 {RHS, ConstantInt::get(RHS->getType(), 1)});
713 }
715 SE.isKnownNonZero(S->getRHS()));
716}
717
718
719
720bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
721 const Loop *L) {
724 return false;
725
726
727
728 if (L == IVIncInsertLoop) {
731 if (!SE.DT.dominates(OInst, IVIncInsertPos))
732 return false;
733 }
734
736 if (!IncV)
737 return false;
738
740 return false;
741
742 if (IncV == PN)
743 return true;
744
745 return isNormalAddRecExprPHI(PN, IncV, L);
746}
747
748
749
750
751
752
753
754
755
756
759 bool allowScale) {
760 if (IncV == InsertPos)
761 return nullptr;
762
764 default:
765 return nullptr;
766
767 case Instruction::Add:
768 case Instruction::Sub: {
770 if (!OInst || SE.DT.dominates(OInst, InsertPos))
772 return nullptr;
773 }
774 case Instruction::BitCast:
776 case Instruction::GetElementPtr:
779 continue;
781 if (!SE.DT.dominates(OInst, InsertPos))
782 return nullptr;
783 }
784 if (allowScale) {
785
786 continue;
787 }
788
789 if ((IncV)->getSourceElementType()->isIntegerTy(8))
790 return nullptr;
791 break;
792 }
794 }
795}
796
797
798
799
800
801
802
803
804void SCEVExpander::fixupInsertPoints(Instruction *I) {
807 if (Builder.GetInsertPoint() == It)
808 Builder.SetInsertPoint(&*NewInsertPt);
809 for (auto *InsertPtGuard : InsertPointGuards)
810 if (InsertPtGuard->GetInsertPoint() == It)
811 InsertPtGuard->SetInsertPoint(NewInsertPt);
812}
813
814
815
816
818 bool RecomputePoisonFlags) {
819 auto FixupPoisonFlags = [this](Instruction *I) {
820
821
822 rememberFlags(I);
823 I->dropPoisonGeneratingFlags();
825 if (auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
831 }
832 };
833
834 if (SE.DT.dominates(IncV, InsertPos)) {
835 if (RecomputePoisonFlags)
836 FixupPoisonFlags(IncV);
837 return true;
838 }
839
840
841
844 return false;
845
846 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
847 return false;
848
849
851 for(;;) {
853 if (!Oper)
854 return false;
855
857 IncV = Oper;
858 if (SE.DT.dominates(IncV, InsertPos))
859 break;
860 }
862 fixupInsertPoints(I);
864 if (RecomputePoisonFlags)
865 FixupPoisonFlags(I);
866 }
867 return true;
868}
869
878
879
880
881
882
883
884bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
885 const Loop *L) {
887 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
888 false));) {
889 if (IVOper == PN)
890 return true;
891 }
892 return false;
893}
894
895
896
897
899 bool useSubtract) {
901
903
904 IncV = Builder.CreatePtrAdd(PN, StepV, "scevgep");
905 } else {
906 IncV = useSubtract ?
907 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
908 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
909 }
910 return IncV;
911}
912
913
914
918 bool &InvertStep) {
919
920 Type *PhiTy = Phi->getType();
921 Type *RequestedTy = Requested->getType();
923 return false;
924
926 return false;
927
928
930 if (!Phi)
931 return false;
932
933
934 if (Phi == Requested) {
935 InvertStep = false;
936 return true;
937 }
938
939
941 InvertStep = true;
942 return true;
943 }
944
945 return false;
946}
947
950 return false;
951
957 const SCEV *ExtendAfterOp =
959 return ExtendAfterOp == OpAfterExtend;
960}
961
964 return false;
965
971 const SCEV *ExtendAfterOp =
973 return ExtendAfterOp == OpAfterExtend;
974}
975
976
977
978
980SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
981 const Loop *L, Type *&TruncTy,
982 bool &InvertStep) {
983 assert((!IVIncInsertLoop || IVIncInsertPos) &&
984 "Uninitialized insert position");
985
986
987 BasicBlock *LatchBlock = L->getLoopLatch();
988 if (LatchBlock) {
989 PHINode *AddRecPhiMatch = nullptr;
991 TruncTy = nullptr;
992 InvertStep = false;
993
994
995
996 bool TryNonMatchingSCEV =
997 IVIncInsertLoop &&
998 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
999
1000 for (PHINode &PN : L->getHeader()->phis()) {
1001 if (!SE.isSCEVable(PN.getType()))
1002 continue;
1003
1004
1005
1008 DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
1009 continue;
1010 }
1011
1013 if (!PhiSCEV)
1014 continue;
1015
1016 bool IsMatchingSCEV = PhiSCEV == Normalized;
1017
1018
1019
1020 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1021 continue;
1022
1023
1026 if (!TempIncV)
1027 continue;
1028
1029
1030 if (LSRMode) {
1031 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1032 continue;
1033 } else {
1034 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1035 continue;
1036 }
1037
1038
1039 if (IsMatchingSCEV) {
1040 IncV = TempIncV;
1041 TruncTy = nullptr;
1042 InvertStep = false;
1043 AddRecPhiMatch = &PN;
1044 break;
1045 }
1046
1047
1048
1049 if ((!TruncTy || InvertStep) &&
1051
1052
1053 AddRecPhiMatch = &PN;
1054 IncV = TempIncV;
1055 TruncTy = Normalized->getType();
1056 }
1057 }
1058
1059 if (AddRecPhiMatch) {
1060
1061
1062 InsertedValues.insert(AddRecPhiMatch);
1063
1064 rememberInstruction(IncV);
1065
1066 ReusedValues.insert(AddRecPhiMatch);
1067 ReusedValues.insert(IncV);
1068 return AddRecPhiMatch;
1069 }
1070 }
1071
1072
1073 SCEVInsertPointGuard Guard(Builder, this);
1074
1075
1076
1077
1078
1079
1080
1081
1083 PostIncLoops.clear();
1084
1085
1086 assert(L->getLoopPreheader() &&
1087 "Can't expand add recurrences without a loop preheader!");
1089 expand(Normalized->getStart(), L->getLoopPreheader()->getTerminator());
1090
1091
1092
1095 L->getHeader()));
1096
1097
1098
1101
1102
1103
1105 if (useSubtract)
1106 Step = SE.getNegativeSCEV(Step);
1107
1108 Value *StepV = expand(Step, L->getHeader()->getFirstInsertionPt());
1109
1110
1111
1112
1113 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1114 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1115
1116
1118 Builder.SetInsertPoint(Header, Header->begin());
1119 PHINode *PN =
1120 Builder.CreatePHI(ExpandTy, pred_size(Header), Twine(IVName) + ".iv");
1121
1122
1123 for (BasicBlock *Pred : predecessors(Header)) {
1124
1125 if (->contains(Pred)) {
1127 continue;
1128 }
1129
1130
1131
1132
1133 Instruction *InsertPos = L == IVIncInsertLoop ?
1134 IVIncInsertPos : Pred->getTerminator();
1135 Builder.SetInsertPoint(InsertPos);
1136 Value *IncV = expandIVInc(PN, StepV, L, useSubtract);
1137
1139 if (IncrementIsNUW)
1141 if (IncrementIsNSW)
1143 }
1145 }
1146
1147
1148
1149 PostIncLoops = SavedPostIncLoops;
1150
1151
1152
1153 InsertedValues.insert(PN);
1154 InsertedIVs.push_back(PN);
1155 return PN;
1156}
1157
1158Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1159 const Loop *L = S->getLoop();
1160
1161
1162
1163 const SCEVAddRecExpr *Normalized = S;
1164 if (PostIncLoops.count(L)) {
1169 }
1170
1171 [[maybe_unused]] const SCEV *Start = Normalized->getStart();
1173 assert(SE.properlyDominates(Start, L->getHeader()) &&
1174 "Start does not properly dominate loop header");
1175 assert(SE.dominates(Step, L->getHeader()) && "Step not dominate loop header");
1176
1177
1178
1179 Type *TruncTy = nullptr;
1180 bool InvertStep = false;
1181 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, TruncTy, InvertStep);
1182
1183
1185 if (!PostIncLoops.count(L))
1187 else {
1188
1189 BasicBlock *LatchBlock = L->getLoopLatch();
1190 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1192
1193
1194
1195
1199 I->setHasNoUnsignedWrap(false);
1201 I->setHasNoSignedWrap(false);
1202 }
1203
1204
1205
1206
1209 &*Builder.GetInsertPoint())) {
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 bool useSubtract =
1221 if (useSubtract)
1222 Step = SE.getNegativeSCEV(Step);
1224 {
1225
1226 SCEVInsertPointGuard Guard(Builder, this);
1227 StepV = expand(Step, L->getHeader()->getFirstInsertionPt());
1228 }
1229 Result = expandIVInc(PN, StepV, L, useSubtract);
1230 }
1231 }
1232
1233
1234
1235 if (TruncTy) {
1236
1237 if (TruncTy != Result->getType())
1238 Result = Builder.CreateTrunc(Result, TruncTy);
1239
1240
1241 if (InvertStep)
1242 Result = Builder.CreateSub(expand(Normalized->getStart()), Result);
1243 }
1244
1246}
1247
1250 const Loop *L = S->getLoop();
1253 !SE.DT.dominates(EB, Builder.GetInsertBlock()))
1254 return nullptr;
1255
1256 for (auto &PN : EB->phis()) {
1257 if (!SE.isSCEVable(PN.getType()))
1258 continue;
1259 auto *ExitSCEV = SE.getSCEV(&PN);
1261 continue;
1264 ExitSCEV = SE.getPtrToIntExpr(ExitSCEV, STy);
1266 continue;
1268 continue;
1269 }
1270
1271
1272
1273 const SCEV *Diff = SE.getMinusSCEV(S, ExitSCEV);
1274 const SCEV *Op = Diff;
1279 continue;
1280
1282 "difference must be of integer type");
1283 Value *DiffV = expand(Diff);
1284 Value *BaseV = fixupLCSSAFormFor(&PN);
1287 return Builder.CreatePtrAdd(BaseV, DiffV);
1288 BaseV = Builder.CreatePtrToInt(BaseV, DiffV->getType());
1289 }
1290 return Builder.CreateAdd(BaseV, DiffV);
1291 }
1292
1293 return nullptr;
1294}
1295
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1308 return expandAddRecExprLiterally(S);
1309
1310 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1311 const Loop *L = S->getLoop();
1312
1313
1314 PHINode *CanonicalIV = nullptr;
1315 if (PHINode *PN = L->getCanonicalInductionVariable())
1316 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1317 CanonicalIV = PN;
1318
1319
1320
1321 if (CanonicalIV &&
1322 SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
1325 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1326 NewOps[i] = SE.getAnyExtendExpr(S->getOperand(i), CanonicalIV->getType());
1327 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1331 V = expand(SE.getTruncateExpr(SE.getUnknown(V), Ty), NewInsertPt);
1332 return V;
1333 }
1334
1335
1336
1337 if (Value *V = tryToReuseLCSSAPhi(S))
1338 return V;
1339
1340
1343 Value *StartV = expand(SE.getPointerBase(S));
1344 return expandAddToGEP(SE.removePointerBase(S), StartV,
1346 }
1347
1349 NewOps[0] = SE.getConstant(Ty, 0);
1350 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1352
1353
1354
1355
1356
1357 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1358 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1359 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1360 }
1361
1362
1363 if (!CanonicalIV) {
1364
1365
1368 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar");
1370 rememberInstruction(CanonicalIV);
1371
1372 SmallPtrSet<BasicBlock *, 4> PredSeen;
1373 Constant *One = ConstantInt::get(Ty, 1);
1374 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1376 if (!PredSeen.insert(HP).second) {
1377
1378
1380 continue;
1381 }
1382
1383 if (L->contains(HP)) {
1384
1385
1386 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1387 "indvar.next",
1390 rememberInstruction(Add);
1392 } else {
1394 }
1395 }
1396 }
1397
1398
1400 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1401 "IVs with types different from the canonical IV should "
1402 "already have been handled!");
1403 return CanonicalIV;
1404 }
1405
1406
1407
1408
1409 if (S->isAffine())
1410 return
1411 expand(SE.getTruncateOrNoop(
1412 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1413 SE.getNoopOrAnyExtend(S->getOperand(1),
1414 CanonicalIV->getType())),
1415 Ty));
1416
1417
1418
1419
1420
1421 const SCEV *IH = SE.getUnknown(CanonicalIV);
1422
1423
1424 const SCEV *NewS = S;
1425 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1427 NewS = Ext;
1428
1430
1431
1432 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1433 return expand(T);
1434}
1435
1438 return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1439 GetOptimalInsertionPointForCastOf(V));
1440}
1441
1444 return Builder.CreateTrunc(V, S->getType());
1445}
1446
1449 return Builder.CreateZExt(V, S->getType(), "",
1450 SE.isKnownNonNegative(S->getOperand()));
1451}
1452
1455 return Builder.CreateSExt(V, S->getType());
1456}
1457
1460 bool IsSequential) {
1461 bool PrevSafeMode = SafeUDivMode;
1462 SafeUDivMode |= IsSequential;
1465 if (IsSequential)
1466 LHS = Builder.CreateFreeze(LHS);
1467 for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1468 SafeUDivMode = (IsSequential && i != 0) || PrevSafeMode;
1470 if (IsSequential && i != 0)
1471 RHS = Builder.CreateFreeze(RHS);
1474 Sel = Builder.CreateIntrinsic(IntrinID, {Ty}, {LHS, RHS},
1475 nullptr, Name);
1476 else {
1479 Sel = Builder.CreateSelect(ICmp, LHS, RHS, Name);
1480 }
1481 LHS = Sel;
1482 }
1483 SafeUDivMode = PrevSafeMode;
1484 return LHS;
1485}
1486
1488 return expandMinMaxExpr(S, Intrinsic::smax, "smax");
1489}
1490
1492 return expandMinMaxExpr(S, Intrinsic::umax, "umax");
1493}
1494
1496 return expandMinMaxExpr(S, Intrinsic::smin, "smin");
1497}
1498
1500 return expandMinMaxExpr(S, Intrinsic::umin, "umin");
1501}
1502
1504 return expandMinMaxExpr(S, Intrinsic::umin, "umin", true);
1505}
1506
1508 return Builder.CreateVScale(S->getType());
1509}
1510
1517
1519
1520 Value *V = expand(SH);
1521
1522 if (Ty && Ty != V->getType()) {
1523 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1524 "non-trivial casts should be done with the SCEVs directly!");
1525 V = InsertNoopCastOfTo(V, Ty);
1526 }
1527 return V;
1528}
1529
1530Value *SCEVExpander::FindValueInExprValueMap(
1533
1534
1536 return nullptr;
1537
1538
1540 return nullptr;
1541
1542 for (Value *V : SE.getSCEVValues(S)) {
1544 if (!EntInst)
1545 continue;
1546
1547
1548
1549
1551 if (S->getType() != V->getType() || !SE.DT.dominates(EntInst, InsertPt) ||
1554 continue;
1555
1556
1558 return V;
1559 DropPoisonGeneratingInsts.clear();
1560 }
1561 return nullptr;
1562}
1563
1564
1565
1566
1567
1568
1569
1570Value *SCEVExpander::expand(const SCEV *S) {
1571
1572
1574
1575
1576
1577 auto SafeToHoist = [](const SCEV *S) {
1581
1582 return SC->getValue()->isZero();
1583
1584
1585
1586
1587 return true;
1588 }
1589 return false;
1590 });
1591 };
1592 if (SafeToHoist(S)) {
1593 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1594 L = L->getParentLoop()) {
1595 if (SE.isLoopInvariant(S, L)) {
1596 if (!L) break;
1597 if (BasicBlock *Preheader = L->getLoopPreheader()) {
1599 } else {
1600
1601
1602
1603 InsertPt = L->getHeader()->getFirstInsertionPt();
1604 }
1605 } else {
1606
1607
1608
1609 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1610 InsertPt = L->getHeader()->getFirstInsertionPt();
1611
1612 while (InsertPt != Builder.GetInsertPoint() &&
1614 InsertPt = std::next(InsertPt);
1615 }
1616 break;
1617 }
1618 }
1619 }
1620
1621
1622 auto I = InsertedExpressions.find(std::make_pair(S, &*InsertPt));
1623 if (I != InsertedExpressions.end())
1624 return I->second;
1625
1626 SCEVInsertPointGuard Guard(Builder, this);
1627 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1628
1629
1631 Value *V = FindValueInExprValueMap(S, &*InsertPt, DropPoisonGeneratingInsts);
1632 if (!V) {
1634 V = fixupLCSSAFormFor(V);
1635 } else {
1636 for (Instruction *I : DropPoisonGeneratingInsts) {
1637 rememberFlags(I);
1638 I->dropPoisonGeneratingAnnotations();
1639
1640
1642 if (auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
1648 }
1650 auto *Src = NNI->getOperand(0);
1653 DL).value_or(false))
1654 NNI->setNonNeg(true);
1655 }
1656 }
1657 }
1658
1659
1660
1661
1662
1663
1664 InsertedExpressions[std::make_pair(S, &*InsertPt)] = V;
1665 return V;
1666}
1667
1668void SCEVExpander::rememberInstruction(Value *I) {
1669 auto DoInsert = [this](Value *V) {
1670 if (!PostIncLoops.empty())
1671 InsertedPostIncValues.insert(V);
1672 else
1673 InsertedValues.insert(V);
1674 };
1675 DoInsert(I);
1676}
1677
1678void SCEVExpander::rememberFlags(Instruction *I) {
1679
1680 OrigFlags.try_emplace(I, PoisonFlags(I));
1681}
1682
1683void SCEVExpander::replaceCongruentIVInc(
1686 BasicBlock *LatchBlock = L->getLoopLatch();
1687 if (!LatchBlock)
1688 return;
1689
1694 if (!OrigInc || !IsomorphicInc)
1695 return;
1696
1697
1698
1699
1700 if (OrigPhi->getType() == Phi->getType()) {
1701 bool Chained = ChainedPhis.contains(Phi);
1702 if (!(Chained || isExpandedAddRecExprPHI(OrigPhi, OrigInc, L)) &&
1703 (Chained || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1705 std::swap(OrigInc, IsomorphicInc);
1706 }
1707 }
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718 const SCEV *TruncExpr =
1719 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
1720 if (OrigInc == IsomorphicInc || TruncExpr != SE.getSCEV(IsomorphicInc) ||
1721 !SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc))
1722 return;
1723
1724 bool BothHaveNUW = false;
1725 bool BothHaveNSW = false;
1728 if (OBOIncV && OBOIsomorphic) {
1729 BothHaveNUW =
1730 OBOIncV->hasNoUnsignedWrap() && OBOIsomorphic->hasNoUnsignedWrap();
1731 BothHaveNSW =
1732 OBOIncV->hasNoSignedWrap() && OBOIsomorphic->hasNoSignedWrap();
1733 }
1734
1735 if ((OrigInc, IsomorphicInc,
1736 true))
1737 return;
1738
1739
1740
1741
1742
1745 "Should only replace an increment with a wider one.");
1746 if (BothHaveNUW || BothHaveNSW) {
1748 OrigInc->setHasNoSignedWrap(OBOIncV->hasNoSignedWrap() || BothHaveNSW);
1749 }
1750
1752 dbgs() << "INDVARS: Eliminated congruent iv.inc: "
1753 << *IsomorphicInc << '\n');
1754 Value *NewInc = OrigInc;
1755 if (OrigInc->getType() != IsomorphicInc->getType()) {
1758 IP = PN->getParent()->getFirstInsertionPt();
1759 else
1761
1762 IRBuilder<> Builder(IP->getParent(), IP);
1763 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1764 NewInc =
1765 Builder.CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1766 }
1769}
1770
1771
1772
1773
1774
1775
1776
1777unsigned
1781
1784
1785 if (TTI)
1786
1787
1789
1790 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1791 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1792 return RHS->getType()->getPrimitiveSizeInBits().getFixedValue() <
1793 LHS->getType()->getPrimitiveSizeInBits().getFixedValue();
1794 });
1795
1796 unsigned NumElim = 0;
1798
1799
1800 for (PHINode *Phi : Phis) {
1801 auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
1803 return V;
1804 if (!SE.isSCEVable(PN->getType()))
1805 return nullptr;
1807 if (!Const)
1808 return nullptr;
1809 return Const->getValue();
1810 };
1811
1812
1813
1814 if (Value *V = SimplifyPHINode(Phi)) {
1815 if (V->getType() != Phi->getType())
1816 continue;
1817 SE.forgetValue(Phi);
1818 Phi->replaceAllUsesWith(V);
1820 ++NumElim;
1822 dbgs() << "INDVARS: Eliminated constant iv: " << *Phi
1823 << '\n');
1824 continue;
1825 }
1826
1827 if (!SE.isSCEVable(Phi->getType()))
1828 continue;
1829
1830 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1831 if (!OrigPhiRef) {
1832 OrigPhiRef = Phi;
1833 if (Phi->getType()->isIntegerTy() && TTI &&
1834 TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1835
1836
1837
1838 const SCEV *PhiExpr = SE.getSCEV(Phi);
1840
1841
1842 const SCEV *TruncExpr =
1843 SE.getTruncateExpr(PhiExpr, Phis.back()->getType());
1844 ExprToIVMap[TruncExpr] = Phi;
1845 }
1846 }
1847 continue;
1848 }
1849
1850
1851
1852 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1853 continue;
1854
1855 replaceCongruentIVInc(Phi, OrigPhiRef, L, DT, DeadInsts);
1857 dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi
1858 << '\n');
1860 DebugType, dbgs() << "INDVARS: Original iv: " << *OrigPhiRef << '\n');
1861 ++NumElim;
1862 Value *NewIV = OrigPhiRef;
1863 if (OrigPhiRef->getType() != Phi->getType()) {
1865 L->getHeader()->getFirstInsertionPt());
1866 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1867 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1868 }
1869 Phi->replaceAllUsesWith(NewIV);
1871 }
1872 return NumElim;
1873}
1874
1879
1881 L->getExitingBlocks(ExitingBlocks);
1882
1883
1884 for (BasicBlock *BB : ExitingBlocks) {
1887
1888 if ((BB->getTerminator(),
1891 continue;
1892
1893 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1894 return true;
1895
1896 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1897 return true;
1898 }
1899
1900
1901
1902
1903
1905 return FindValueInExprValueMap(S, At, DropPoisonGeneratingInsts) != nullptr;
1906}
1907
1912
1915
1916 struct OperationIndices {
1917 OperationIndices(unsigned Opc, size_t min, size_t max) :
1918 Opcode(Opc), MinIdx(min), MaxIdx(max) { }
1919 unsigned Opcode;
1920 size_t MinIdx;
1921 size_t MaxIdx;
1922 };
1923
1924
1925
1926
1928
1929 auto CastCost = [&](unsigned Opcode) -> InstructionCost {
1931 return TTI.getCastInstrCost(Opcode, S->getType(),
1932 S->getOperand(0)->getType(),
1934 };
1935
1936 auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
1937 unsigned MinIdx = 0,
1939 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1940 return NumRequired *
1941 TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
1942 };
1943
1944 auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx,
1946 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1947 Type *OpType = S->getType();
1948 return NumRequired * TTI.getCmpSelInstrCost(
1951 };
1952
1953 switch (S->getSCEVType()) {
1955 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
1959 return 0;
1961 Cost = CastCost(Instruction::PtrToInt);
1962 break;
1964 Cost = CastCost(Instruction::Trunc);
1965 break;
1967 Cost = CastCost(Instruction::ZExt);
1968 break;
1970 Cost = CastCost(Instruction::SExt);
1971 break;
1973 unsigned Opcode = Instruction::UDiv;
1975 if (SC->getAPInt().isPowerOf2())
1976 Opcode = Instruction::LShr;
1977 Cost = ArithCost(Opcode, 1);
1978 break;
1979 }
1981 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
1982 break;
1984
1985
1986
1987 Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
1988 break;
1994
1995
1996 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
1997 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
1998 switch (S->getSCEVType()) {
2000
2001
2002 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
2003 Cost += ArithCost(Instruction::Or,
2004 S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
2005 Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
2006 break;
2007 }
2008 default:
2010 "Unhandled SCEV expression type?");
2011 break;
2012 }
2013 break;
2014 }
2016
2017 unsigned NumRecurrences = S->getNumOperands() - 1;
2018 Cost += TTI.getCFInstrCost(Instruction::PHI, CostKind) * NumRecurrences;
2019 Cost +=
2020 TTI.getArithmeticInstrCost(Instruction::Add, S->getType(), CostKind) *
2021 NumRecurrences;
2022
2023 Worklist.emplace_back(Instruction::PHI, 0, S->getOperand(0));
2024
2025 for (const SCEV *Op : S->operands().drop_front())
2027 break;
2028 }
2029 }
2030
2031 for (auto &CostOp : Operations) {
2032 for (auto SCEVOp : enumerate(S->operands())) {
2033
2034 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2035 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2037 }
2038 }
2039 return Cost;
2040}
2041
2042bool SCEVExpander::isHighCostExpansionHelper(
2047 if (Cost > Budget)
2048 return true;
2049
2050 const SCEV *S = WorkItem.S;
2051
2053 return false;
2054
2055
2056
2058 return false;
2059
2061 L->getHeader()->getParent()->hasMinSize()
2064
2067 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2070
2071 return false;
2073
2075 return false;
2080 return Cost > Budget;
2081 }
2088 return false;
2089 }
2091
2092
2093
2094
2095
2096
2097
2098
2100 SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
2101 return false;
2102
2105 return false;
2106 }
2115 "Nary expr should have more than 1 operand.");
2116
2117
2120 return Cost > Budget;
2121 }
2124 "Polynomial should be at least linear");
2127 return Cost > Budget;
2128 }
2129 }
2131}
2132
2136 switch (Pred->getKind()) {
2144 }
2145 }
2147}
2148
2151 Value *Expr0 = expand(Pred->getLHS(), IP);
2152 Value *Expr1 = expand(Pred->getRHS(), IP);
2153
2154 Builder.SetInsertPoint(IP);
2156 auto *I = Builder.CreateICmp(InvPred, Expr0, Expr1, "ident.check");
2157 return I;
2158}
2159
2162 assert(AR->isAffine() && "Cannot generate RT check for "
2163 "non-affine expression");
2164
2165
2167 const SCEV *ExitCount =
2168 SE.getPredicatedSymbolicMaxBackedgeTakenCount(AR->getLoop(), Pred);
2169
2171
2174
2176 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2177 unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2178
2179
2180
2181
2182
2183
2184 Builder.SetInsertPoint(Loc);
2185 Value *TripCountVal = expand(ExitCount, Loc);
2186
2189
2190 Value *StepValue = expand(Step, Loc);
2191 Value *NegStepValue = expand(SE.getNegativeSCEV(Step), Loc);
2192 Value *StartValue = expand(Start, Loc);
2193
2196
2197 Builder.SetInsertPoint(Loc);
2198
2200 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210 auto ComputeEndCheck = [&]() -> Value * {
2211
2212 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2213
2214 CallInst *Mul = Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, Ty,
2215 {AbsStep, TruncTripCount},
2216 nullptr, "mul");
2217 Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2218 Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2219
2221 bool NeedPosCheck = !SE.isKnownNegative(Step);
2222 bool NeedNegCheck = !SE.isKnownPositive(Step);
2223
2225 Value *NegMulV = Builder.CreateNeg(MulV);
2226 if (NeedPosCheck)
2227 Add = Builder.CreatePtrAdd(StartValue, MulV);
2228 if (NeedNegCheck)
2229 Sub = Builder.CreatePtrAdd(StartValue, NegMulV);
2230 } else {
2231 if (NeedPosCheck)
2232 Add = Builder.CreateAdd(StartValue, MulV);
2233 if (NeedNegCheck)
2234 Sub = Builder.CreateSub(StartValue, MulV);
2235 }
2236
2237 Value *EndCompareLT = nullptr;
2238 Value *EndCompareGT = nullptr;
2239 Value *EndCheck = nullptr;
2240 if (NeedPosCheck)
2241 EndCheck = EndCompareLT = Builder.CreateICmp(
2243 if (NeedNegCheck)
2244 EndCheck = EndCompareGT = Builder.CreateICmp(
2246 if (NeedPosCheck && NeedNegCheck) {
2247
2248 EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2249 }
2250 return Builder.CreateOr(EndCheck, OfMul);
2251 };
2252 Value *EndCheck = ComputeEndCheck();
2253
2254
2255
2256
2257 if (SrcBits > DstBits) {
2259 auto *BackedgeCheck =
2261 ConstantInt::get(Loc->getContext(), MaxVal));
2262 BackedgeCheck = Builder.CreateAnd(
2263 BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2264
2265 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2266 }
2267
2268 return EndCheck;
2269}
2270
2274 Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2275
2276
2279
2280
2283
2284 if (NUSWCheck && NSSWCheck)
2285 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2286
2287 if (NUSWCheck)
2288 return NUSWCheck;
2289
2290 if (NSSWCheck)
2291 return NSSWCheck;
2292
2294}
2295
2298
2300 for (const auto *Pred : Union->getPredicates()) {
2302 Builder.SetInsertPoint(IP);
2303 }
2304
2305 if (Checks.empty())
2307 return Builder.CreateOr(Checks);
2308}
2309
2310Value *SCEVExpander::fixupLCSSAFormFor(Value *V) {
2312 if (!PreserveLCSSA || !DefI)
2313 return V;
2314
2316 Loop *DefLoop = SE.LI.getLoopFor(DefI->getParent());
2317 Loop *UseLoop = SE.LI.getLoopFor(InsertPt->getParent());
2318 if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2319 return V;
2320
2321
2322
2323
2324
2325
2326
2327
2329 if (DefI->getType()->isIntegerTy())
2331 else
2335 auto RemoveUserOnExit =
2337
2343 &InsertedPHIs);
2344 for (PHINode *PN : InsertedPHIs)
2345 rememberInstruction(PN);
2346 for (PHINode *PN : PHIsToRemove) {
2348 continue;
2349 InsertedValues.erase(PN);
2350 InsertedPostIncValues.erase(PN);
2352 }
2353
2354 return User->getOperand(0);
2355}
2356
2357namespace {
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376struct SCEVFindUnsafe {
2377 ScalarEvolution &SE;
2378 bool CanonicalMode;
2379 bool IsUnsafe = false;
2380
2381 SCEVFindUnsafe(ScalarEvolution &SE, bool CanonicalMode)
2382 : SE(SE), CanonicalMode(CanonicalMode) {}
2383
2384 bool follow(const SCEV *S) {
2387 IsUnsafe = true;
2388 return false;
2389 }
2390 }
2392
2393
2394 if (!AR->getLoop()->getLoopPreheader() &&
2395 (!CanonicalMode || !AR->isAffine())) {
2396 IsUnsafe = true;
2397 return false;
2398 }
2399 }
2400 return true;
2401 }
2402 bool isDone() const { return IsUnsafe; }
2403};
2404}
2405
2407 SCEVFindUnsafe Search(SE, CanonicalMode);
2409 return !Search.IsUnsafe;
2410}
2411
2415 return false;
2416
2417
2418
2419
2420
2421
2422 if (SE.properlyDominates(S, InsertionPoint->getParent()))
2423 return true;
2426 return true;
2429 return true;
2430 }
2431 return false;
2432}
2433
2435
2436 if (ResultUsed)
2437 return;
2438
2439
2440 for (auto [I, Flags] : Expander.OrigFlags)
2441 Flags.apply(I);
2442
2443 auto InsertedInstructions = Expander.getAllInsertedInstructions();
2444#ifndef NDEBUG
2446 InsertedInstructions);
2447 (void)InsertedSet;
2448#endif
2449
2450 Expander.clear();
2451
2452
2454#ifndef NDEBUG
2456 [&InsertedSet](Value *U) {
2457 return InsertedSet.contains(cast(U));
2458 }) &&
2459 "removed instruction should only be used by instructions inserted "
2460 "during expansion");
2461#endif
2462 assert(->getType()->isVoidTy() &&
2463 "inserted instruction should have non-void types");
2465 I->eraseFromParent();
2466 }
2467}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
Definition ScalarEvolutionExpander.cpp:962
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that's most relevant for SCEV expansion.
Definition ScalarEvolutionExpander.cpp:434
static InstructionCost costAndCollectOperands(const SCEVOperand &WorkItem, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, SmallVectorImpl< SCEVOperand > &Worklist)
Definition ScalarEvolutionExpander.cpp:1908
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
Definition ScalarEvolutionExpander.cpp:948
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
Definition ScalarEvolutionExpander.cpp:915
#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
Definition ScalarEvolutionExpander.cpp:35
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This pass exposes codegen information to IR-level passes.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents an incoming formal argument to a Function.
LLVM Basic Block Representation.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static GEPNoWrapFlags noUnsignedWrap()
static GEPNoWrapFlags none()
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
const SCEV * getOperand() const
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
LLVM_ABI void cleanup()
Definition ScalarEvolutionExpander.cpp:2434
LLVM_ABI Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
Definition ScalarEvolutionExpander.cpp:2160
LLVM_ABI bool hasRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Determine whether there is an existing expansion of S that can be reused.
Definition ScalarEvolutionExpander.cpp:1875
SmallVector< Instruction *, 32 > getAllInsertedInstructions() const
Return a vector containing all instructions inserted during expansion.
LLVM_ABI bool isSafeToExpand(const SCEV *S) const
Return true if the given expression is safe to expand in the sense that all materialized values are s...
Definition ScalarEvolutionExpander.cpp:2406
LLVM_ABI bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint) const
Return true if the given expression is safe to expand in the sense that all materialized values are d...
Definition ScalarEvolutionExpander.cpp:2412
LLVM_ABI unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
Definition ScalarEvolutionExpander.cpp:1778
LLVM_ABI Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
Definition ScalarEvolutionExpander.cpp:2296
LLVM_ABI bool hoistIVInc(Instruction *IncV, Instruction *InsertPos, bool RecomputePoisonFlags=false)
Utility for hoisting IncV (with all subexpressions requried for its computation) before InsertPos.
Definition ScalarEvolutionExpander.cpp:817
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
Definition ScalarEvolutionExpander.cpp:2133
static LLVM_ABI bool canReuseFlagsFromOriginalIVInc(PHINode *OrigPhi, PHINode *WidePhi, Instruction *OrigInc, Instruction *WideInc)
Return true if both increments directly increment the corresponding IV PHI nodes and have the same op...
Definition ScalarEvolutionExpander.cpp:870
LLVM_ABI Value * expandComparePredicate(const SCEVComparePredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
Definition ScalarEvolutionExpander.cpp:2149
LLVM_ABI Value * expandCodeFor(const SCEV *SH, Type *Ty, BasicBlock::iterator I)
Insert code to directly compute the specified SCEV expression into the program.
Definition ScalarEvolutionExpander.cpp:1511
LLVM_ABI Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
Definition ScalarEvolutionExpander.cpp:2271
LLVM_ABI Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment's IV operand.
Definition ScalarEvolutionExpander.cpp:757
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
Definition ScalarEvolutionExpander.cpp:179
LLVM_ABI BasicBlock::iterator findInsertPointAfter(Instruction *I, Instruction *MustDominate) const
Returns a suitable insert point after I, that dominates MustDominate.
Definition ScalarEvolutionExpander.cpp:153
void setInsertPoint(Instruction *IP)
Set the current insertion point.
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
This class represents a cast from a pointer to a pointer-sized integer value.
This class represents a signed maximum selection.
This class represents a signed minimum selection.
This class represents a sequential/in-order unsigned minimum selection.
This class represents a sign extension of a small integer value to a larger integer value.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
const SCEV * getLHS() const
const SCEV * getRHS() const
This class represents an unsigned maximum selection.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
LLVM_ABI ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
The main scalar evolution driver.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind, Instruction *Inst=nullptr) const
Return the expected cost of materialization for the given integer immediate of the specified type for...
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ None
The cast is not used with a load/store of any kind.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
class_match< const SCEVConstant > m_SCEVConstant()
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
SCEVUnaryExpr_match< SCEVPtrToIntExpr, Op0_t > m_scev_PtrToInt(const Op0_t &Op0)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
class_match< const SCEV > m_SCEV()
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast - Return the argument parameter cast to the specified type.
constexpr from_range_t from_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI cl::opt< unsigned > SCEVCheapExpansionBudget
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE, bool CheckInvertible=true)
Normalize S to be post-increment for all loops present in Loops.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
PredIterator< BasicBlock, Value::user_iterator > pred_iterator
constexpr unsigned BitWidth
LLVM_ABI bool formLCSSAForInstructions(SmallVectorImpl< Instruction * > &Worklist, const DominatorTree &DT, const LoopInfo &LI, ScalarEvolution *SE, SmallVectorImpl< PHINode * > *PHIsToRemove=nullptr, SmallVectorImpl< PHINode * > *InsertedPHIs=nullptr)
Ensures LCSSA form for every instruction from the Worklist in the scope of innermost containing loop.
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
SmallPtrSet< const Loop *, 2 > PostIncLoopSet
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
LLVM_ABI void apply(Instruction *I)
Definition ScalarEvolutionExpander.cpp:76
LLVM_ABI PoisonFlags(const Instruction *I)
Definition ScalarEvolutionExpander.cpp:48
struct for holding enough information to help calculate the cost of the given SCEV when expanded into...
const SCEV * S
The SCEV operand to be costed.
unsigned ParentOpcode
LLVM instruction opcode that uses the operand.
int OperandIdx
The use index of an expanded instruction.
Value * visit(const SCEV *S)