LLVM: lib/Analysis/BasicAliasAnalysis.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
58#include
59#include
60#include
61#include
62#include
63
64#define DEBUG_TYPE "basicaa"
65
66using namespace llvm;
67
68
71
74
75
76
77
78STATISTIC(SearchLimitReached, "Number of times the limit to "
79 "decompose GEPs is reached");
80STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
81
82
83
85
88
89
90
91
94 return true;
95
96
97 return false;
98}
99
100
101
102
103
104
108 bool NullIsValidLoc,
109 bool RoundToAlign = false) {
116 return std::nullopt;
117}
118
119
120
121
125 bool NullIsValidLoc) {
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
147 return false;
148
149
150
151 std::optional ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
152 true);
153
155}
156
157
158
159
163 bool NullIsValidLoc) {
164
165
166
167
168 bool CanBeNull, CanBeFreed;
170 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
171 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
172
173
177}
178
179
182 std::optional ObjectSize =
184 return ObjectSize && *ObjectSize == Size;
185}
186
187
191}
192
193
194
195
196
198
201 bool OrAt) {
203}
204
209 return Succs.empty() ||
211}
212
215 bool OrAt) {
217 return false;
218
219 auto Iter = EarliestEscapes.insert({Object, nullptr});
220 if (Iter.second) {
223 false, true, DT);
224 if (EarliestCapture)
225 Inst2Obj[EarliestCapture].push_back(Object);
226 Iter.first->second = EarliestCapture;
227 }
228
229
230 if (!Iter.first->second)
231 return true;
232
233
234 if ()
235 return false;
236
237 if (I == Iter.first->second) {
238 if (OrAt)
239 return false;
241 }
242
244}
245
247 auto Iter = Inst2Obj.find(I);
248 if (Iter != Inst2Obj.end()) {
249 for (const Value *Obj : Iter->second)
250 EarliestEscapes.erase(Obj);
251 Inst2Obj.erase(I);
252 }
253}
254
255
256
257
258
259namespace {
260
261struct CastedValue {
263 unsigned ZExtBits = 0;
264 unsigned SExtBits = 0;
265 unsigned TruncBits = 0;
266
267 bool IsNonNegative = false;
268
269 explicit CastedValue(const Value *V) : V(V) {}
270 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
271 unsigned TruncBits, bool IsNonNegative)
272 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
273 IsNonNegative(IsNonNegative) {}
274
276 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
277 SExtBits;
278 }
279
280 CastedValue withValue(const Value *NewV, bool PreserveNonNeg) const {
281 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
282 IsNonNegative && PreserveNonNeg);
283 }
284
285
286 CastedValue withZExtOfValue(const Value *NewV, bool ZExtNonNegative) const {
287 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
289 if (ExtendBy <= TruncBits)
290
291
292 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
293 IsNonNegative);
294
295
296 ExtendBy -= TruncBits;
297
298
299
300
301 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
302 ZExtNonNegative);
303 }
304
305
306 CastedValue withSExtOfValue(const Value *NewV) const {
307 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
309 if (ExtendBy <= TruncBits)
310
311
312 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
313 IsNonNegative);
314
315
316 ExtendBy -= TruncBits;
317
318
319 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
320 }
321
323 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
324 "Incompatible bit width");
325 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
326 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
327 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
328 return N;
329 }
330
332 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
333 "Incompatible bit width");
334 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
335 if (IsNonNegative && .isAllNonNegative())
339 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
340 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
341 return N;
342 }
343
344 bool canDistributeOver(bool NUW, bool NSW) const {
345
346
347
348 return (!ZExtBits || NUW) && (!SExtBits || NSW);
349 }
350
351 bool hasSameCastsAs(const CastedValue &Other) const {
352 if (V->getType() != Other.V->getType())
353 return false;
354
355 if (ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
356 TruncBits == Other.TruncBits)
357 return true;
358
359
360 if (IsNonNegative || Other.IsNonNegative)
361 return (ZExtBits + SExtBits == Other.ZExtBits + Other.SExtBits &&
362 TruncBits == Other.TruncBits);
363 return false;
364 }
365};
366
367
368struct LinearExpression {
369 CastedValue Val;
372
373
374 bool IsNUW;
375
376 bool IsNSW;
377
378 LinearExpression(const CastedValue &Val, const APInt &Scale,
379 const APInt &Offset, bool IsNUW, bool IsNSW)
380 : Val(Val), Scale(Scale), Offset(Offset), IsNUW(IsNUW), IsNSW(IsNSW) {}
381
382 LinearExpression(const CastedValue &Val)
383 : Val(Val), IsNUW(true), IsNSW(true) {
384 unsigned BitWidth = Val.getBitWidth();
387 }
388
389 LinearExpression mul(const APInt &Other, bool MulIsNUW, bool MulIsNSW) const {
390
391
392 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
393 bool NUW = IsNUW && (Other.isOne() || MulIsNUW);
394 return LinearExpression(Val, Scale * Other, Offset * Other, NUW, NSW);
395 }
396};
397}
398
399
400
404
406 return Val;
407
408 if (const ConstantInt *Const = dyn_cast(Val.V))
409 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
410 Val.evaluateWith(Const->getValue()), true, true);
411
412 if (const BinaryOperator *BOp = dyn_cast(Val.V)) {
413 if (ConstantInt *RHSC = dyn_cast(BOp->getOperand(1))) {
414 APInt RHS = Val.evaluateWith(RHSC->getValue());
415
416
417 bool NUW = true, NSW = true;
418 if (isa(BOp)) {
419 NUW &= BOp->hasNoUnsignedWrap();
420 NSW &= BOp->hasNoSignedWrap();
421 }
422 if (!Val.canDistributeOver(NUW, NSW))
423 return Val;
424
425
426
427 if (Val.TruncBits)
428 NUW = NSW = false;
429
430 LinearExpression E(Val);
431 switch (BOp->getOpcode()) {
432 default:
433
434
435 return Val;
436 case Instruction::Or:
437
438 if (!cast(BOp)->isDisjoint())
439 return Val;
440
441 [[fallthrough]];
442 case Instruction::Add: {
444 Depth + 1, AC, DT);
445 E.Offset += RHS;
446 E.IsNUW &= NUW;
447 E.IsNSW &= NSW;
448 break;
449 }
450 case Instruction::Sub: {
452 Depth + 1, AC, DT);
453 E.Offset -= RHS;
454 E.IsNUW = false;
455 E.IsNSW &= NSW;
456 break;
457 }
458 case Instruction::Mul:
460 Depth + 1, AC, DT)
461 .mul(RHS, NUW, NSW);
462 break;
463 case Instruction::Shl:
464
465
466
467
468
469 if (RHS.getLimitedValue() > Val.getBitWidth())
470 return Val;
471
473 Depth + 1, AC, DT);
474 E.Offset <<= RHS.getLimitedValue();
475 E.Scale <<= RHS.getLimitedValue();
476 E.IsNUW &= NUW;
477 E.IsNSW &= NSW;
478 break;
479 }
480 return E;
481 }
482 }
483
484 if (const auto *ZExt = dyn_cast(Val.V))
486 Val.withZExtOfValue(ZExt->getOperand(0), ZExt->hasNonNeg()), DL,
487 Depth + 1, AC, DT);
488
489 if (isa(Val.V))
491 Val.withSExtOfValue(cast(Val.V)->getOperand(0)),
493
494 return Val;
495}
496
497namespace {
498
499
500struct VariableGEPIndex {
501 CastedValue Val;
503
504
506
507
508 bool IsNSW;
509
510
511
512
513 bool IsNegated;
514
515 bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
516 if (IsNegated == Other.IsNegated)
517 return Scale == -Other.Scale;
518 return Scale == Other.Scale;
519 }
520
521 void dump() const {
523 dbgs() << "\n";
524 }
526 OS << "(V=" << Val.V->getName()
527 << ", zextbits=" << Val.ZExtBits
528 << ", sextbits=" << Val.SExtBits
529 << ", truncbits=" << Val.TruncBits
530 << ", scale=" << Scale
531 << ", nsw=" << IsNSW
532 << ", negated=" << IsNegated << ")";
533 }
534};
535}
536
537
538
540
542
544
546
548
551 dbgs() << "\n";
552 }
556 << "(DecomposedGEP Base=" << Base->getName() << ", Offset=" << Offset
557 << ", VarIndices=[";
559 if (i != 0)
560 OS << ", ";
562 }
563 OS << "])";
564 }
565};
566
567
568
569
570
571
572
573
574
576BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
578
580 SearchTimes++;
581 const Instruction *CxtI = dyn_cast(V);
582
583 unsigned IndexSize = DL.getIndexTypeSizeInBits(V->getType());
584 DecomposedGEP Decomposed;
585 Decomposed.Offset = APInt(IndexSize, 0);
586 do {
587
588 const Operator *Op = dyn_cast(V);
589 if () {
590
591 if (const GlobalAlias *GA = dyn_cast(V)) {
592 if (!GA->isInterposable()) {
593 V = GA->getAliasee();
594 continue;
595 }
596 }
597 Decomposed.Base = V;
598 return Decomposed;
599 }
600
601 if (Op->getOpcode() == Instruction::BitCast ||
602 Op->getOpcode() == Instruction::AddrSpaceCast) {
603 Value *NewV = Op->getOperand(0);
604
605
606 if (DL.getIndexTypeSizeInBits(NewV->getType()) != IndexSize) {
607 Decomposed.Base = V;
608 return Decomposed;
609 }
610 V = NewV;
611 continue;
612 }
613
614 const GEPOperator *GEPOp = dyn_cast(Op);
615 if (!GEPOp) {
616 if (const auto *PHI = dyn_cast(V)) {
617
618 if (PHI->getNumIncomingValues() == 1) {
619 V = PHI->getIncomingValue(0);
620 continue;
621 }
622 } else if (const auto *Call = dyn_cast(V)) {
623
624
625
626
627
628
629
630
631
634 continue;
635 }
636 }
637
638 Decomposed.Base = V;
639 return Decomposed;
640 }
641
642
644
646
647
652
654
655 unsigned FieldNo = cast(Index)->getZExtValue();
656 if (FieldNo == 0)
657 continue;
658
659 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
660 continue;
661 }
662
663
664 if (const ConstantInt *CIdx = dyn_cast(Index)) {
665 if (CIdx->isZero())
666 continue;
667
668
671 Decomposed.Base = V;
672 return Decomposed;
673 }
674
675 Decomposed.Offset += AllocTypeSize.getFixedValue() *
676 CIdx->getValue().sextOrTrunc(IndexSize);
677 continue;
678 }
679
682 Decomposed.Base = V;
683 return Decomposed;
684 }
685
686
687
690 bool NonNeg = NUSW && NUW;
691 unsigned Width = Index->getType()->getIntegerBitWidth();
692 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
693 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
695 CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, 0, AC, DT);
696
697
700 Decomposed.Offset += LE.Offset;
702 if (.IsNUW)
703 Decomposed.NWFlags = Decomposed.NWFlags.withoutNoUnsignedWrap();
704
705
706
707
708
709 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
710 if ((Decomposed.VarIndices[i].Val.V == LE.Val.V ||
711 areBothVScale(Decomposed.VarIndices[i].Val.V, LE.Val.V)) &&
712 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
713 Scale += Decomposed.VarIndices[i].Scale;
714
715 LE.IsNSW = LE.IsNUW = false;
716 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
717 break;
718 }
719 }
720
721 if (!!Scale) {
722 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
723 false};
724 Decomposed.VarIndices.push_back(Entry);
725 }
726 }
727
728
730 } while (--MaxLookup);
731
732
733 Decomposed.Base = V;
734 SearchLimitReached++;
735 return Decomposed;
736}
737
740 bool IgnoreLocals) {
741 assert(Visited.empty() && "Visited must be cleared after use!");
743
744 unsigned MaxLookup = 8;
748
749 do {
751 if (!Visited.insert(V).second)
752 continue;
753
754
755 if (IgnoreLocals && isa(V))
756 continue;
757
758
759
760
761
762
763
764 if (const Argument *Arg = dyn_cast(V)) {
765 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
767 continue;
768 }
769 }
770
771
772 if (const GlobalVariable *GV = dyn_cast(V)) {
773
774
775
776 if (!GV->isConstant())
778 continue;
779 }
780
781
782 if (const SelectInst *SI = dyn_cast(V)) {
783 Worklist.push_back(SI->getTrueValue());
784 Worklist.push_back(SI->getFalseValue());
785 continue;
786 }
787
788
789
790 if (const PHINode *PN = dyn_cast(V)) {
791
792 if (PN->getNumIncomingValues() > MaxLookup)
794 append_range(Worklist, PN->incoming_values());
795 continue;
796 }
797
798
800 } while (!Worklist.empty() && --MaxLookup);
801
802
803 if (!Worklist.empty())
805
806 return Result;
807}
808
811 return II && II->getIntrinsicID() == IID;
812}
813
814
817 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
818
819 if (const Function *F = dyn_cast(Call->getCalledOperand())) {
821
822
823 if (Call->hasReadingOperandBundles())
825 if (Call->hasClobberingOperandBundles())
827 Min &= FuncME;
828 }
829
830 return Min;
831}
832
833
834
836 switch (F->getIntrinsicID()) {
837 case Intrinsic::experimental_guard:
838 case Intrinsic::experimental_deoptimize:
839
840
843 }
844
845 return F->getMemoryEffects();
846}
847
849 unsigned ArgIdx) {
850 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
852
853 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
855
856 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
858
860}
861
862#ifndef NDEBUG
864 if (const Instruction *inst = dyn_cast(V)) {
865 if (!inst->getParent())
866 return nullptr;
867 return inst->getParent()->getParent();
868 }
869
870 if (const Argument *arg = dyn_cast(V))
871 return arg->getParent();
872
873 return nullptr;
874}
875
877
880
881 return !F1 || !F2 || F1 == F2;
882}
883#endif
884
889 "BasicAliasAnalysis doesn't support interprocedural queries.");
890 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
891}
892
893
894
895
896
897
898
903 "AliasAnalysis query involving multiple functions!");
904
906
907
908
909
910
911
912 if (isa(Object))
913 if (const CallInst *CI = dyn_cast(Call))
914 if (CI->isTailCall() &&
915 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
917
918
919
920 if (auto *AI = dyn_cast(Object))
921 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
923
924
925
926
927
928
929
930
931
932
933
934 if (!isa(Object) && Call != Object &&
936 (isa(Object) || !Call->hasFnAttr(Attribute::ReturnsTwice))) {
937
938
939
941
942 unsigned OperandNo = 0;
943 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
944 CI != CE; ++CI, ++OperandNo) {
945 if (!(*CI)->getType()->isPointerTy())
946 continue;
947
948
949
950 if (Call->doesNotAccessMemory(OperandNo))
951 continue;
952
953
954
958
960 continue;
961
962
963 if (Call->onlyReadsMemory(OperandNo)) {
965 continue;
966 }
967
968 if (Call->onlyWritesMemory(OperandNo)) {
970 continue;
971 }
972
973
974
976 break;
977 }
978
979
981 return Result;
982 }
983
984
985
986
987
988
989
991
992
996 }
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1023
1024
1026}
1027
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1046
1047 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1051
1052
1054}
1055
1056
1057
1058
1059
1060
1061
1066 auto BaseObjectsAlias = [&]() {
1072 };
1073
1075
1076
1077 if (!isa(V2))
1079
1080
1081
1082 return BaseObjectsAlias();
1083 }
1084
1086 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1087 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1088
1089
1090 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1092
1093
1094 if (DecompGEP1.Offset.getBitWidth() != DecompGEP2.Offset.getBitWidth())
1095 return BaseObjectsAlias();
1096
1097
1098 if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) {
1099 std::swap(DecompGEP1, DecompGEP2);
1101 std::swap(UnderlyingV1, UnderlyingV2);
1102 }
1103
1104
1105
1106 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1107
1108
1109
1110
1111
1112
1113 if (DecompGEP1.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1115 DecompGEP1.Offset.sge(V2Size.getValue()) &&
1118
1119
1120 if (DecompGEP2.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1122 DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1125
1126
1127
1128 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1131
1132
1136
1137
1138
1142 return BaseAlias;
1143 }
1144
1145
1146
1147
1148
1149 if (DecompGEP1.VarIndices.empty()) {
1150 APInt &Off = DecompGEP1.Offset;
1151
1152
1155 const bool Swapped = Off.isNegative();
1156
1157 if (Swapped) {
1158
1159
1160
1161
1162
1163
1164 std::swap(VLeftSize, VRightSize);
1166 }
1167
1170
1173 if (Off.ult(LSize)) {
1174
1175
1178 Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(LSize)) {
1179
1180
1181
1183 AR.swap(Swapped);
1184 }
1185 return AR;
1186 }
1188 } else {
1189
1191 bool Overflow;
1194 if (!Overflow && Off.uge(UpperRange))
1196 }
1197 }
1198
1199
1200
1201
1202 if (DecompGEP1.VarIndices.size() == 1 &&
1203 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1204 DecompGEP1.Offset.isZero() &&
1207 const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1209 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1211
1212
1213
1214 bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1215 if (Overflows) {
1218 }
1219
1220 if (!Overflows) {
1221
1222
1223
1227 }
1228 }
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239 if (!DecompGEP1.VarIndices.empty() &&
1240 DecompGEP1.NWFlags.hasNoUnsignedWrap() && V2Size.hasValue() &&
1243
1244
1247
1248
1251
1254 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1255 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1257 APInt ScaleForGCD = Scale;
1258 if (.IsNSW)
1259 ScaleForGCD =
1261
1262 if (i == 0)
1263 GCD = ScaleForGCD.abs();
1264 else
1266
1268 true, &AC, Index.CxtI);
1274 CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1275
1277 "Bit widths are normalized to MaxIndexSize");
1278 if (Index.IsNSW)
1280 else
1282
1283 if (Index.IsNegated)
1284 OffsetRange = OffsetRange.sub(CR);
1285 else
1286 OffsetRange = OffsetRange.add(CR);
1287 }
1288
1289
1290
1291
1292
1293
1294
1295 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1297 ModOffset += GCD;
1299 (GCD - ModOffset).uge(V1Size.getValue()))
1301
1302
1303
1304 unsigned BW = OffsetRange.getBitWidth();
1311
1312
1313
1314 std::optional MinAbsVarIndex;
1315 if (DecompGEP1.VarIndices.size() == 1) {
1316
1317 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1318 if (Var.Val.TruncBits == 0 &&
1320
1321
1322 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1323 if (Var.IsNSW)
1324 return true;
1325
1326 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1327
1328
1329
1330 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1331 if (MaxScaleValueBW <= 0)
1332 return false;
1333 return Var.Scale.ule(
1335 };
1336
1337
1338 if (MultiplyByScaleNoWrap(Var)) {
1339
1340 MinAbsVarIndex = Var.Scale.abs();
1341 }
1342 }
1343 } else if (DecompGEP1.VarIndices.size() == 2) {
1344
1345
1346
1347
1348 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1349 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1350 if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1352 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, nullptr,
1353 DT))
1354 MinAbsVarIndex = Var0.Scale.abs();
1355 }
1356
1357 if (MinAbsVarIndex) {
1358
1359 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1360 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1361
1365 }
1366
1367 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1369
1370
1371
1372
1374}
1375
1377
1379 return A;
1380
1384
1386}
1387
1388
1389
1394
1395
1396 if (const SelectInst *SI2 = dyn_cast(V2))
1397 if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1398 AAQI)) {
1406 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1408 }
1409
1410
1411
1416
1421}
1422
1423
1424
1430
1431
1432
1433
1434 if (const PHINode *PN2 = dyn_cast(V2))
1436 std::optional Alias;
1441 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1442 AAQI);
1443 if (Alias)
1445 else
1446 Alias = ThisAlias;
1448 break;
1449 }
1450 return *Alias;
1451 }
1452
1454
1455
1456
1457 bool isRecursive = false;
1458 auto CheckForRecPhi = [&](Value *PV) {
1460 return false;
1462 isRecursive = true;
1463 return true;
1464 }
1465 return false;
1466 };
1467
1469 Value *OnePhi = nullptr;
1471
1472 if (PV1 == PN)
1473 continue;
1474
1475 if (isa(PV1)) {
1476 if (OnePhi && OnePhi != PV1) {
1477
1478
1479
1480
1481
1483 }
1484 OnePhi = PV1;
1485 }
1486
1487 if (CheckForRecPhi(PV1))
1488 continue;
1489
1490 if (UniqueSrc.insert(PV1).second)
1492 }
1493
1494 if (OnePhi && UniqueSrc.size() > 1)
1495
1496
1498
1499
1500
1501
1502 if (V1Srcs.empty())
1504
1505
1506
1507
1508 if (isRecursive)
1510
1511
1512
1514
1517
1518
1519
1522
1523
1526
1527
1528
1529 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1531
1536 break;
1537 }
1538
1539 return Alias;
1540}
1541
1542
1543
1548
1549
1552
1553
1555 V2 = V2->stripPointerCastsForAliasAnalysis();
1556
1557
1558
1559 if (isa(V1) || isa(V2))
1561
1562
1563
1564
1565
1566
1567
1568 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1570
1573
1574
1577
1578
1579
1586
1587 if (O1 != O2) {
1588
1591
1592
1593
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1608 O2, dyn_cast(O1), true))
1611 O1, dyn_cast(O2), true))
1613 }
1614
1615
1616
1620 TLI, NullIsValidLocation)) ||
1623 TLI, NullIsValidLocation)))
1625
1629 continue;
1630
1633 if (OBU.getTagName() == "separate_storage") {
1635 const Value *Hint1 = OBU.Inputs[0].get();
1636 const Value *Hint2 = OBU.Inputs[1].get();
1637
1638
1641
1643 auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1644 if (const Instruction *PtrI = dyn_cast(Ptr)) {
1646 true);
1647 }
1648 if (const Argument *PtrA = dyn_cast(Ptr)) {
1650 &*PtrA->getParent()->getEntryBlock().begin();
1652 true);
1653 }
1654 return false;
1655 };
1656
1657 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1658
1659
1660
1662 true)) ||
1663 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1665 }
1666 }
1667 }
1668 }
1669 }
1670
1671
1672
1673
1674
1675
1676
1677
1681 }
1682
1683
1684
1685
1686
1687 if (AAQI.Depth >= 512)
1689
1690
1691
1692
1693
1696 const bool Swapped = V1 > V2;
1697 if (Swapped)
1698 std::swap(Locs.first, Locs.second);
1701 if (!Pair.second) {
1702 auto &Entry = Pair.first->second;
1703 if (.isDefinitive()) {
1704
1705
1706
1708 if (Entry.isAssumption())
1709 ++Entry.NumAssumptionUses;
1710 }
1711
1713 Result.swap(Swapped);
1715 }
1716
1720 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1721
1724 auto &Entry = It->second;
1725
1726
1727 bool AssumptionDisproven =
1729 if (AssumptionDisproven)
1731
1732
1735
1736 Entry.Result.swap(Swapped);
1737
1738
1739
1740
1741 if (AssumptionDisproven)
1744
1745
1746
1751 } else {
1753 }
1754
1755
1756
1757 if (AAQI.Depth == 1) {
1758
1759
1764 }
1767 }
1769}
1770
1771AliasResult BasicAAResult::aliasCheckRecursive(
1775 if (const GEPOperator *GV1 = dyn_cast(V1)) {
1776 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1779 } else if (const GEPOperator *GV2 = dyn_cast(V2)) {
1780 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1784 }
1785
1786 if (const PHINode *PN = dyn_cast(V1)) {
1790 } else if (const PHINode *PN = dyn_cast(V2)) {
1795 }
1796
1797 if (const SelectInst *S1 = dyn_cast(V1)) {
1801 } else if (const SelectInst *S2 = dyn_cast(V2)) {
1806 }
1807
1808
1809
1810 if (O1 == O2) {
1816 }
1817
1819}
1820
1821
1822
1823
1824
1825
1826
1827bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1828 const Value *V2,
1830 if (V != V2)
1831 return false;
1832
1834 return true;
1835
1836
1837
1838 const Instruction *Inst = dyn_cast(V);
1839 if (!Inst || Inst->getParent()->isEntryBlock())
1840 return true;
1841
1842 return isNotInCycle(Inst, getDT(AAQI), nullptr);
1843}
1844
1845
1846void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1847 const DecomposedGEP &SrcGEP,
1849
1850
1851 if (DestGEP.Offset.ult(SrcGEP.Offset))
1852 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1853
1854 DestGEP.Offset -= SrcGEP.Offset;
1855 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1856
1857
1858 bool Found = false;
1859 for (auto I : enumerate(DestGEP.VarIndices)) {
1860 VariableGEPIndex &Dest = I.value();
1861 if ((!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) &&
1863 !Dest.Val.hasSameCastsAs(Src.Val))
1864 continue;
1865
1866
1867 if (Dest.IsNegated) {
1868 Dest.Scale = -Dest.Scale;
1869 Dest.IsNegated = false;
1870 Dest.IsNSW = false;
1871 }
1872
1873
1874
1875 if (Dest.Scale != Src.Scale) {
1876
1877
1878 if (Dest.Scale.ult(Src.Scale))
1879 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1880
1881 Dest.Scale -= Src.Scale;
1882 Dest.IsNSW = false;
1883 } else {
1884 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1885 }
1886 Found = true;
1887 break;
1888 }
1889
1890
1891 if (!Found) {
1892 VariableGEPIndex Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1893 true};
1894 DestGEP.VarIndices.push_back(Entry);
1895
1896
1897 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1898 }
1899 }
1900}
1901
1902bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1908 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1910 return false;
1911
1914
1915 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1916
1917 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1918 !Var0.hasNegatedScaleOf(Var1) ||
1919 Var0.Val.V->getType() != Var1.Val.V->getType())
1920 return false;
1921
1922
1923
1924
1925
1926 LinearExpression E0 =
1928 LinearExpression E1 =
1930 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1931 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1932 return false;
1933
1934
1935
1936
1937
1938
1939
1940
1941 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1943 APInt MinDiffBytes =
1944 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1945
1946
1947
1948
1949
1950 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1951 MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1952}
1953
1954
1955
1956
1957
1959
1964 return BasicAAResult(F.getDataLayout(), F, TLI, AC, DT);
1965}
1966
1969}
1970
1972
1973void BasicAAWrapperPass::anchor() {}
1974
1976 "Basic Alias Analysis (stateless AA impl)", true, true)
1982
1985}
1986
1988 auto &ACT = getAnalysis();
1989 auto &TLIWP = getAnalysis();
1990 auto &DTWP = getAnalysis();
1991
1993 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1994 &DTWP.getDomTree()));
1995
1996 return false;
1997}
1998
2004}
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
static const Function * getParent(const Value *V)
static bool isObjectSmallerThan(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
static cl::opt< bool > EnableSeparateStorageAnalysis("basic-aa-separate-storage", cl::Hidden, cl::init(true))
static bool notDifferentParent(const Value *O1, const Value *O2)
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
static bool isNotInCycle(const Instruction *I, const DominatorTree *DT, const LoopInfo *LI)
static bool areBothVScale(const Value *V1, const Value *V2)
Return true if both V1 and V2 are VScale.
static TypeSize getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
static const unsigned MaxLookupSearchDepth
This is the interface for LLVM's primary stateless and local alias analysis.
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
std::optional< std::vector< StOtherPiece > > Other
This file provides utility analysis objects describing memory locations.
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
place backedge safepoints impl
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file provides utility classes that use RAII to save and restore values.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
This class stores info we want to provide to or retain within an alias query.
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
unsigned Depth
Query depth used to distinguish recursive queries.
int NumAssumptionUses
How many active NoAlias assumption uses there are.
std::pair< AACacheLoc, AACacheLoc > LocPair
bool MayBeCrossIteration
Tracks whether the accesses may be on different cycle iterations.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt zext(unsigned width) const
Zero extend to a new width.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isNegative() const
Determine sign of this APInt.
unsigned countr_zero() const
Count the number of trailing zero bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
The possible results of an alias query.
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
@ MayAlias
The two locations may or may not alias.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
void setOffset(int32_t NewOffset)
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
This class represents an incoming formal argument to a Function.
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
This is the AA result object for the basic, local, and stateless alias analysis.
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI)
Returns the behavior when calling the given call site.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI, const Instruction *CtxI)
Legacy wrapper pass to provide the BasicAAResult object.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
A constant pointer value that points to null.
This class represents a range of values.
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
bool erase(const KeyT &Val)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
void removeInstruction(Instruction *I)
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
FunctionPass class - This class is used to implement most global optimizations.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags all()
bool hasNoUnsignedWrap() const
bool hasNoUnsignedSignedWrap() const
Type * getSourceElementType() const
bool hasNoUnsignedWrap() const
GEPNoWrapFlags getNoWrapFlags() const
Module * getParent()
Get the module that this global value is contained inside of...
A wrapper class for inspecting calls to intrinsic functions.
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
TypeSize getValue() const
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Representation for a specific memory location.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
const Value * Ptr
The address of the start of the location.
This is a utility class that provides an abstraction for the common functionality between Instruction...
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
This class represents the LLVM 'select' instruction.
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
bool isPointerTy() const
True if this is an instance of PointerType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
StringRef getName() const
Return a constant reference to the value's name.
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
bool match(Val *V, const Pattern &P)
VScaleVal_match m_VScale()
initializer< Ty > init(const Ty &Val)
@ Assume
Do not drop type tests (default).
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const BasicBlock *StopBB, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is at least one path from a block in 'Worklist' to 'StopBB' without passing t...
auto successors(const MachineBasicBlock *BB)
bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isModSet(const ModRefInfo MRI)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
FunctionPass * createBasicAAWrapperPass()
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, unsigned MaxUsesToExplore=0)
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
void initializeBasicAAWrapperPassPass(PassRegistry &)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isModAndRefSet(const ModRefInfo MRI)
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
constexpr unsigned BitWidth
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
gep_type_iterator gep_type_begin(const User *GEP)
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
SmallVector< VariableGEPIndex, 4 > VarIndices
void print(raw_ostream &OS) const
static constexpr int Definitive
Cache entry is neither an assumption nor does it use a (non-definitive) assumption.
static constexpr int AssumptionBased
Cache entry is not an assumption itself, but may be using an assumption from higher up the stack.
A special type used by analysis passes to provide an address that identifies that particular analysis...
virtual ~CaptureAnalysis()=0
virtual bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt)=0
Check whether Object is not captured before instruction I.
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A lightweight accessor for an operand bundle meant to be passed around by value.
StringRef getTagName() const
Return the tag of this operand bundle as a string.
A utility class that uses RAII to save and restore the value of a variable.