LLVM: lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
88#include
89#include
90#include
91#include
92
93using namespace llvm;
95
98
101
104
107
110
113
116 cl::desc("If set to true, IRCE may eliminate wide range checks in loops "
117 "with narrow latch condition."));
118
122 "Maximum size of range check type for which can be produced runtime "
123 "overflow check of its limit's computation"));
124
128
129#define DEBUG_TYPE "irce"
130
131namespace {
132
133
134
135
136
137class InductiveRangeCheck {
138
139 const SCEV *Begin = nullptr;
140 const SCEV *Step = nullptr;
141 const SCEV *End = nullptr;
142 Use *CheckUse = nullptr;
143
146 const SCEV *&End);
147
148 static void
152
156 const SCEV *&End);
157
158 static bool reassociateSubLHS(Loop *L, Value *VariantLHS, Value *InvariantRHS,
161
162public:
163 const SCEV *getBegin() const { return Begin; }
164 const SCEV *getStep() const { return Step; }
165 const SCEV *getEnd() const { return End; }
166
168 OS << "InductiveRangeCheck:\n";
169 OS << " Begin: ";
170 Begin->print(OS);
171 OS << " Step: ";
173 OS << " End: ";
175 OS << "\n CheckUse: ";
176 getCheckUse()->getUser()->print(OS);
177 OS << " Operand: " << getCheckUse()->getOperandNo() << "\n";
178 }
179
181 void dump() {
183 }
184
185 Use *getCheckUse() const { return CheckUse; }
186
187
188
189
191 const SCEV *Begin;
192 const SCEV *End;
193
194 public:
195 Range(const SCEV *Begin, const SCEV *End) : Begin(Begin), End(End) {
197 }
198
200 const SCEV *getBegin() const { return Begin; }
201 const SCEV *getEnd() const { return End; }
203 if (Begin == End)
204 return true;
205 if (IsSigned)
207 else
209 }
210 };
211
212
213
214 bool getPassingDirection() { return true; }
215
216
217
218
219 std::optional computeSafeIterationSpace(ScalarEvolution &SE,
221 bool IsLatchSigned) const;
222
223
224
225
226
227
228 static void extractRangeChecksFromBranch(
230 std::optional<uint64_t> EstimatedTripCount,
232};
233
234class InductiveRangeCheckElimination {
239
241 GetBFIFunc GetBFI;
242
243
244
245
246 std::optional<uint64_t> estimatedTripCount(const Loop &L);
247
248public:
251 LoopInfo &LI, GetBFIFunc GetBFI = nullptr)
252 : SE(SE), BPI(BPI), DT(DT), LI(LI), GetBFI(GetBFI) {}
253
255};
256
257}
258
259
260
261
262
263bool InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
266 const SCEV *&End) {
267 auto IsLoopInvariant = [&SE, L](Value *V) {
269 };
270
271 ICmpInst::Predicate Pred = ICI->getPredicate();
274
276 return false;
277
278
279 if (IsLoopInvariant(LHS)) {
282 } else if (!IsLoopInvariant(RHS))
283
284 return false;
285
286 if (parseIvAgaisntLimit(L, LHS, RHS, Pred, SE, Index, End))
287 return true;
288
289 if (reassociateSubLHS(L, LHS, RHS, Pred, SE, Index, End))
290 return true;
291
292
293 return false;
294}
295
296
297bool InductiveRangeCheck::parseIvAgaisntLimit(Loop *L, Value *LHS, Value *RHS,
298 ICmpInst::Predicate Pred,
299 ScalarEvolution &SE,
300 const SCEVAddRecExpr *&Index,
301 const SCEV *&End) {
302
303 auto SIntMaxSCEV = [&](Type *T) {
306 };
307
309 if (!AddRec)
310 return false;
311
312
313
314
315
316 switch (Pred) {
317 default:
318 return false;
319
320 case ICmpInst::ICMP_SGE:
323 End = SIntMaxSCEV(Index->getType());
324 return true;
325 }
326 return false;
327
328 case ICmpInst::ICMP_SGT:
331 End = SIntMaxSCEV(Index->getType());
332 return true;
333 }
334 return false;
335
336 case ICmpInst::ICMP_SLT:
337 case ICmpInst::ICMP_ULT:
340 return true;
341
342 case ICmpInst::ICMP_SLE:
343 case ICmpInst::ICMP_ULE:
346 bool Signed = Pred == ICmpInst::ICMP_SLE;
350 return true;
351 }
352 return false;
353 }
354
356}
357
358
359
360bool InductiveRangeCheck::reassociateSubLHS(
361 Loop *L, Value *VariantLHS, Value *InvariantRHS, ICmpInst::Predicate Pred,
362 ScalarEvolution &SE, const SCEVAddRecExpr *&Index, const SCEV *&End) {
365 return false;
366
369 const SCEV *Limit = SE.getSCEV(InvariantRHS);
370
371 bool OffsetSubtracted = false;
373
376
377 OffsetSubtracted = true;
378 else
379 return false;
380
382 if (!AddRec)
383 return false;
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
424 const SCEV *LHS,
425 const SCEV *RHS) -> const SCEV * {
426 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
428 switch (BinOp) {
429 default:
431 case Instruction::Add:
433 break;
434 case Instruction::Sub:
436 break;
437 }
438
442
443
444
447 return nullptr;
448
449 auto WideTy = IntegerType::get(Ty->getContext(), Ty->getBitWidth() * 2);
452 0);
453 };
454
455 if (OffsetSubtracted)
456
457 Limit = getExprScaledIfOverflow(Instruction::BinaryOps::Add, Offset, Limit);
458 else {
459
460 Limit = getExprScaledIfOverflow(Instruction::BinaryOps::Sub, Offset, Limit);
461 Pred = ICmpInst::getSwappedPredicate(Pred);
462 }
463
464 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
465
466 if (Pred == ICmpInst::ICMP_SLE && Limit)
467 Limit = getExprScaledIfOverflow(Instruction::BinaryOps::Add, Limit,
469 if (Limit) {
471 End = Limit;
472 return true;
473 }
474 }
475 return false;
476}
477
478void InductiveRangeCheck::extractRangeChecksFromCond(
479 Loop *L, ScalarEvolution &SE, Use &ConditionUse,
480 SmallVectorImpl &Checks,
481 SmallPtrSetImpl<Value *> &Visited) {
482 Value *Condition = ConditionUse.get();
483 if (!Visited.insert(Condition).second)
484 return;
485
486
488 extractRangeChecksFromCond(L, SE, cast(Condition)->getOperandUse(0),
489 Checks, Visited);
490 extractRangeChecksFromCond(L, SE, cast(Condition)->getOperandUse(1),
491 Checks, Visited);
492 return;
493 }
494
496 if (!ICI)
497 return;
498
499 const SCEV *End = nullptr;
500 const SCEVAddRecExpr *IndexAddRec = nullptr;
501 if (!parseRangeCheckICmp(L, ICI, SE, IndexAddRec, End))
502 return;
503
504 assert(IndexAddRec && "IndexAddRec was not computed");
505 assert(End && "End was not computed");
506
507 if ((IndexAddRec->getLoop() != L) || !IndexAddRec->isAffine())
508 return;
509
510 InductiveRangeCheck IRC;
511 IRC.End = End;
512 IRC.Begin = IndexAddRec->getStart();
514 IRC.CheckUse = &ConditionUse;
516}
517
518void InductiveRangeCheck::extractRangeChecksFromBranch(
519 BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo *BPI,
520 std::optional<uint64_t> EstimatedTripCount,
521 SmallVectorImpl &Checks, bool &Changed) {
523 return;
524
525 unsigned IndexLoopSucc = L->contains(BI->getSuccessor(0)) ? 0 : 1;
527 "No edges coming to loop?");
528
530 auto SuccessProbability =
532 if (EstimatedTripCount) {
533 auto EstimatedEliminatedChecks =
534 SuccessProbability.scale(*EstimatedTripCount);
536 LLVM_DEBUG(dbgs() << "irce: could not prove profitability for branch "
537 << *BI << ": "
538 << "estimated eliminated checks too low "
539 << EstimatedEliminatedChecks << "\n";);
540 return;
541 }
542 } else {
543 BranchProbability LikelyTaken(15, 16);
544 if (SuccessProbability < LikelyTaken) {
545 LLVM_DEBUG(dbgs() << "irce: could not prove profitability for branch "
546 << *BI << ": "
547 << "could not estimate trip count "
548 << "and branch success probability too low "
549 << SuccessProbability << "\n";);
550 return;
551 }
552 }
553 }
554
555
556
557 if (IndexLoopSucc != 0) {
560 if (BPI)
563 }
564
565 SmallPtrSet<Value *, 8> Visited;
566 InductiveRangeCheck::extractRangeChecksFromCond(L, SE, BI->getOperandUse(0),
567 Checks, Visited);
568}
569
570
571
576
577
578
579
580static std::optionalLoopConstrainer::SubRanges
582 InductiveRangeCheck::Range &Range,
585
587 return std::nullopt;
589 return std::nullopt;
590
592
594
595
596
598 RTy, SE, IsSignedPredicate);
600 SE, IsSignedPredicate);
601
603
604
605
606
607
608 const SCEV *Smallest = nullptr, *Greatest = nullptr, *GreatestSeen = nullptr;
609
611 if (Increasing) {
612 Smallest = Start;
613 Greatest = End;
614
616 } else {
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
633 Greatest = SE.getAddExpr(Start, One);
634 GreatestSeen = Start;
635 }
636
637 auto Clamp = [&SE, Smallest, Greatest, IsSignedPredicate](const SCEV *S) {
638 return IsSignedPredicate
641 };
642
643
648
649 bool ProvablyNoPreloop =
651 if (!ProvablyNoPreloop)
652 Result.LowLimit = Clamp(Range.getBegin());
653
654 bool ProvablyNoPostLoop =
656 if (!ProvablyNoPostLoop)
657 Result.HighLimit = Clamp(Range.getEnd());
658
659 return Result;
660}
661
662
663
664
665std::optionalInductiveRangeCheck::Range
666InductiveRangeCheck::computeSafeIterationSpace(ScalarEvolution &SE,
667 const SCEVAddRecExpr *IndVar,
668 bool IsLatchSigned) const {
669
670
674
675 if (!IVType || !RCType)
676 return std::nullopt;
677 if (IVType->getBitWidth() > RCType->getBitWidth())
678 return std::nullopt;
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
701 return std::nullopt;
702
706 if ()
707 return std::nullopt;
708 assert(->isZero() && "Recurrence with zero step?");
709
710 const SCEV *C = getBegin();
713 return std::nullopt;
714
715 assert(->getValue()->isZero() && "Recurrence with zero step?");
716 unsigned BitWidth = RCType->getBitWidth();
719
720
721
722
723
724
725
726
727
728
729
730
731
732 auto ClampedSubtract = [&](const SCEV *X, const SCEV *Y) {
733
734
735
736
737 if (IsLatchSigned) {
738
739
740
741
742
743
744
745
746
747
748 const SCEV *XMinusSIntMax = SE.getMinusSCEV(X, SIntMax);
751 } else
752
753
754
755
756
757
758
759
760
761
763 };
766
767
768 auto SCEVCheckNonNegative = [&](const SCEV *X) {
769 const Loop *L = IndVar->getLoop();
771 const SCEV *One = SE.getOne(X->getType());
772
774 return One;
777
778
781 };
782
783
784
785 auto SCEVCheckWillNotOverflow = [&](const SCEV *X) {
786
787
788 const SCEV *SIntMaxExt = SE.getSignExtendExpr(SIntMax, X->getType());
789 const SCEV *OverflowCheck =
790 SCEVCheckNonNegative(SE.getMinusSCEV(SIntMaxExt, X));
791
792
793
794 const SCEV *SIntMinExt = SE.getSignExtendExpr(SIntMin, X->getType());
795 const SCEV *UnderflowCheck =
796 SCEVCheckNonNegative(SE.getMinusSCEV(X, SIntMinExt));
797
798 return SE.getMulExpr(OverflowCheck, UnderflowCheck);
799 };
800
801
802
803
804
805
806
807
808
809 const SCEV *REnd = getEnd();
810 const SCEV *EndWillNotOverflow = SE.getOne(RCType);
811
812 auto PrintRangeCheck = [&](raw_ostream &OS) {
814 OS << "irce: in function ";
815 OS << L->getHeader()->getParent()->getName();
816 OS << ", in ";
817 L->print(OS);
818 OS << "there is range check with scaled boundary:\n";
820 };
821
822 if (EndType->getBitWidth() > RCType->getBitWidth()) {
823 assert(EndType->getBitWidth() == RCType->getBitWidth() * 2);
825 PrintRangeCheck(errs());
826
827
828
829 EndWillNotOverflow =
830 SE.getTruncateExpr(SCEVCheckWillNotOverflow(REnd), RCType);
832 }
833
834 const SCEV *RuntimeChecks =
835 SE.getMulExpr(SCEVCheckNonNegative(REnd), EndWillNotOverflow);
836 const SCEV *Begin = SE.getMulExpr(ClampedSubtract(Zero, M), RuntimeChecks);
837 const SCEV *End = SE.getMulExpr(ClampedSubtract(REnd, M), RuntimeChecks);
838
839 return InductiveRangeCheck::Range(Begin, End);
840}
841
842static std::optionalInductiveRangeCheck::Range
844 const std::optionalInductiveRangeCheck::Range &R1,
845 const InductiveRangeCheck::Range &R2) {
846 if (R2.isEmpty(SE, true))
847 return std::nullopt;
848 if (!R1)
849 return R2;
850 auto &R1Value = *R1;
851
852
853 assert(!R1Value.isEmpty(SE, true) &&
854 "We should never have empty R1!");
855
856
857
858 if (R1Value.getType() != R2.getType())
859 return std::nullopt;
860
861 const SCEV *NewBegin = SE.getSMaxExpr(R1Value.getBegin(), R2.getBegin());
862 const SCEV *NewEnd = SE.getSMinExpr(R1Value.getEnd(), R2.getEnd());
863
864
865 auto Ret = InductiveRangeCheck::Range(NewBegin, NewEnd);
866 if (Ret.isEmpty(SE, true))
867 return std::nullopt;
868 return Ret;
869}
870
871static std::optionalInductiveRangeCheck::Range
873 const std::optionalInductiveRangeCheck::Range &R1,
874 const InductiveRangeCheck::Range &R2) {
875 if (R2.isEmpty(SE, false))
876 return std::nullopt;
877 if (!R1)
878 return R2;
879 auto &R1Value = *R1;
880
881
882 assert(!R1Value.isEmpty(SE, false) &&
883 "We should never have empty R1!");
884
885
886
887 if (R1Value.getType() != R2.getType())
888 return std::nullopt;
889
890 const SCEV *NewBegin = SE.getUMaxExpr(R1Value.getBegin(), R2.getBegin());
891 const SCEV *NewEnd = SE.getUMinExpr(R1Value.getEnd(), R2.getEnd());
892
893
894 auto Ret = InductiveRangeCheck::Range(NewBegin, NewEnd);
895 if (Ret.isEmpty(SE, false))
896 return std::nullopt;
897 return Ret;
898}
899
903
904
905 if (LI.empty())
909
910
911
914 };
915 InductiveRangeCheckElimination IRCE(SE, &BPI, DT, LI, { getBFI });
916
918 {
919 bool CFGChanged = false;
920 for (const auto &L : LI) {
921 CFGChanged |= simplifyLoop(L, &DT, &LI, &SE, nullptr, nullptr,
922 false);
924 }
926
931 }
932 }
933
936 auto LPMAddNewLoop = [&Worklist](Loop *NL, bool IsSubloop) {
937 if (!IsSubloop)
939 };
940
941 while (!Worklist.empty()) {
943 if (IRCE.run(L, LPMAddNewLoop)) {
949 }
950 }
951 }
952
956}
957
958std::optional<uint64_t>
959InductiveRangeCheckElimination::estimatedTripCount(const Loop &L) {
960 if (GetBFI) {
964 if (phFreq == 0 || hFreq == 0)
965 return std::nullopt;
966 return {hFreq / phFreq};
967 }
968
969 if (!BPI)
970 return std::nullopt;
971
972 auto *Latch = L.getLoopLatch();
973 if (!Latch)
974 return std::nullopt;
976 if (!LatchBr)
977 return std::nullopt;
978
979 auto LatchBrExitIdx = LatchBr->getSuccessor(0) == L.getHeader() ? 1 : 0;
980 BranchProbability ExitProbability =
982 if (ExitProbability.isUnknown() || ExitProbability.isZero())
983 return std::nullopt;
984
986}
987
988bool InductiveRangeCheckElimination::run(
989 Loop *L, function_ref<void(Loop *, bool)> LPMAddNewLoop) {
991 LLVM_DEBUG(dbgs() << "irce: giving up constraining loop, too large\n");
992 return false;
993 }
994
995 BasicBlock *Preheader = L->getLoopPreheader();
996 if (!Preheader) {
997 LLVM_DEBUG(dbgs() << "irce: loop has no preheader, leaving\n");
998 return false;
999 }
1000
1001 auto EstimatedTripCount = estimatedTripCount(*L);
1004 LLVM_DEBUG(dbgs() << "irce: could not prove profitability: "
1005 << "the estimated number of iterations is "
1006 << *EstimatedTripCount << "\n");
1007 return false;
1008 }
1009
1013
1014 for (auto *BBI : L->getBlocks())
1016 InductiveRangeCheck::extractRangeChecksFromBranch(
1017 TBI, L, SE, BPI, EstimatedTripCount, RangeChecks, Changed);
1018
1019 if (RangeChecks.empty())
1021
1022 auto PrintRecognizedRangeChecks = [&](raw_ostream &OS) {
1023 OS << "irce: looking at loop "; L->print(OS);
1024 OS << "irce: loop has " << RangeChecks.size()
1025 << " inductive range checks: \n";
1026 for (InductiveRangeCheck &IRC : RangeChecks)
1027 IRC.print(OS);
1028 };
1029
1031
1033 PrintRecognizedRangeChecks(errs());
1034
1035 const char *FailureReason = nullptr;
1036 std::optional MaybeLoopStructure =
1038 FailureReason);
1039 if (!MaybeLoopStructure) {
1040 LLVM_DEBUG(dbgs() << "irce: could not parse loop structure: "
1041 << FailureReason << "\n";);
1043 }
1044 LoopStructure LS = *MaybeLoopStructure;
1045 const SCEVAddRecExpr *IndVar =
1047
1048 std::optionalInductiveRangeCheck::Range SafeIterRange;
1049
1051
1052
1053
1054
1055 auto IntersectRange =
1057
1058 for (InductiveRangeCheck &IRC : RangeChecks) {
1059 auto Result = IRC.computeSafeIterationSpace(SE, IndVar,
1060 LS.IsSignedPredicate);
1061 if (Result) {
1062 auto MaybeSafeIterRange = IntersectRange(SE, SafeIterRange, *Result);
1063 if (MaybeSafeIterRange) {
1064 assert(!MaybeSafeIterRange->isEmpty(SE, LS.IsSignedPredicate) &&
1065 "We should never return empty ranges!");
1066 RangeChecksToEliminate.push_back(IRC);
1067 SafeIterRange = *MaybeSafeIterRange;
1068 }
1069 }
1070 }
1071
1072 if (!SafeIterRange)
1074
1075 std::optionalLoopConstrainer::SubRanges MaybeSR =
1077 if (!MaybeSR) {
1078 LLVM_DEBUG(dbgs() << "irce: could not compute subranges\n");
1079 return false;
1080 }
1081
1082 LoopConstrainer LC(*L, LI, LPMAddNewLoop, LS, SE, DT,
1083 SafeIterRange->getBegin()->getType(), *MaybeSR);
1084
1085 if (LC.run()) {
1087
1088 auto PrintConstrainedLoopInfo = [L]() {
1089 dbgs() << "irce: in function ";
1090 dbgs() << L->getHeader()->getParent()->getName() << ": ";
1091 dbgs() << "constrained ";
1093 };
1094
1095 LLVM_DEBUG(PrintConstrainedLoopInfo());
1096
1098 PrintConstrainedLoopInfo();
1099
1100
1101
1102 for (InductiveRangeCheck &IRC : RangeChecksToEliminate) {
1103 ConstantInt *FoldedRangeCheck = IRC.getPassingDirection()
1106 IRC.getCheckUse()->set(FoldedRangeCheck);
1107 }
1108 }
1109
1111}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static const SCEV * NoopOrExtend(const SCEV *S, Type *Ty, ScalarEvolution &SE, bool Signed)
If the type of S matches with Ty, return S.
Definition InductiveRangeCheckElimination.cpp:572
static cl::opt< bool > PrintRangeChecks("irce-print-range-checks", cl::Hidden, cl::init(false))
static cl::opt< bool > AllowUnsignedLatchCondition("irce-allow-unsigned-latch", cl::Hidden, cl::init(true))
static cl::opt< unsigned > LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden, cl::init(64))
static std::optional< InductiveRangeCheck::Range > IntersectSignedRange(ScalarEvolution &SE, const std::optional< InductiveRangeCheck::Range > &R1, const InductiveRangeCheck::Range &R2)
Definition InductiveRangeCheckElimination.cpp:843
static cl::opt< bool > AllowNarrowLatchCondition("irce-allow-narrow-latch", cl::Hidden, cl::init(true), cl::desc("If set to true, IRCE may eliminate wide range checks in loops " "with narrow latch condition."))
static cl::opt< unsigned > MaxTypeSizeForOverflowCheck("irce-max-type-size-for-overflow-check", cl::Hidden, cl::init(32), cl::desc("Maximum size of range check type for which can be produced runtime " "overflow check of its limit's computation"))
static cl::opt< unsigned > MinEliminatedChecks("irce-min-eliminated-checks", cl::Hidden, cl::init(10))
static cl::opt< bool > PrintChangedLoops("irce-print-changed-loops", cl::Hidden, cl::init(false))
static std::optional< InductiveRangeCheck::Range > IntersectUnsignedRange(ScalarEvolution &SE, const std::optional< InductiveRangeCheck::Range > &R1, const InductiveRangeCheck::Range &R2)
Definition InductiveRangeCheckElimination.cpp:872
static cl::opt< bool > SkipProfitabilityChecks("irce-skip-profitability-checks", cl::Hidden, cl::init(false))
static std::optional< LoopConstrainer::SubRanges > calculateSubRanges(ScalarEvolution &SE, const Loop &L, InductiveRangeCheck::Range &Range, const LoopStructure &MainLoopStructure)
Definition InductiveRangeCheckElimination.cpp:581
static cl::opt< bool > PrintScaledBoundaryRangeChecks("irce-print-scaled-boundary-range-checks", cl::Hidden, cl::init(false))
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
This header provides classes for managing per-loop analyses.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
PowerPC Reduce CR logical Operation
This file provides a priority worklist.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static const uint32_t IV[8]
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
void invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Invalidate cached analyses for an IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
LLVM_ABI BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
uint64_t getFrequency() const
Returns the frequency as a fixpoint number scaled by the entry frequency.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI void swapSuccEdgesProbabilities(const BasicBlock *Src)
Swap outgoing edges probabilities for Src with branch terminator.
LLVM_ABI uint64_t scaleByInverse(uint64_t Num) const
Scale a large integer by the inverse.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getPredicate() const
Return the predicate for this instruction.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
Analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
This instruction compares its operands according to the predicate given to the constructor.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition InductiveRangeCheckElimination.cpp:900
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Analysis pass that exposes the LoopInfo for a function.
Represents a single loop in the control flow graph.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
bool empty() const
Determine if the PriorityWorklist is empty or not.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
This class represents an analyzed expression in the program.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI const SCEV * getSMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getSMinExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
A version of PriorityWorklist that selects small size optimized data structures for the vector and ma...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
const Use & getOperandUse(unsigned i) const
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
FunctionAddr VTableAddr Value
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
decltype(auto) dyn_cast(const From &Val)
dyn_cast - Return the argument parameter cast to the specified type.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
LLVM_ABI void InvertBranch(BranchInst *PBI, IRBuilderBase &Builder)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_TEMPLATE_ABI void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist< Loop *, 4 > &)
Utility that implements appending of loops onto a worklist given a range.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI bool isKnownNegativeInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE)
Returns true if we can prove that S is defined and always negative in loop L.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
LLVM_ABI PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isKnownNonNegativeInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE)
Returns true if we can prove that S is defined and always non-negative in loop L.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
IntegerType * ExitCountTy
static std::optional< LoopStructure > parseLoopStructure(ScalarEvolution &, Loop &, bool, const char *&)