LLVM: lib/CodeGen/GlobalISel/GISelValueTracking.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
40
41#define DEBUG_TYPE "gisel-known-bits"
42
43using namespace llvm;
45
47
49 "Analysis for ComputingKnownBits", false, true)
50
52 : MF(MF), MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
53 DL(MF.getFunction().getDataLayout()), MaxDepth(MaxDepth) {}
54
57 switch (MI->getOpcode()) {
58 case TargetOpcode::COPY:
60 case TargetOpcode::G_ASSERT_ALIGN: {
61
62 return Align(MI->getOperand(2).getImm());
63 }
64 case TargetOpcode::G_FRAME_INDEX: {
65 int FrameIdx = MI->getOperand(1).getIndex();
66 return MF.getFrameInfo().getObjectAlign(FrameIdx);
67 }
68 case TargetOpcode::G_INTRINSIC:
69 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
70 case TargetOpcode::G_INTRINSIC_CONVERGENT:
71 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
72 default:
73 return TL.computeKnownAlignForTargetInstr(*this, R, MRI, Depth + 1);
74 }
75}
76
78 assert(MI.getNumExplicitDefs() == 1 &&
79 "expected single return generic instruction");
81}
82
84 const LLT Ty = MRI.getType(R);
85
86
87
88 APInt DemandedElts =
91}
92
94 const APInt &DemandedElts,
98 return Known;
99}
100
102 LLT Ty = MRI.getType(R);
103 unsigned BitWidth = Ty.getScalarSizeInBits();
105}
106
110
114
115[[maybe_unused]] static void
117 dbgs() << "[" << Depth << "] Compute known bits: " << MI << "[" << Depth
118 << "] Computed for: " << MI << "[" << Depth << "] Known: 0x"
120 << "[" << Depth << "] Zero: 0x" << toString(Known.Zero, 16, false)
121 << "\n"
122 << "[" << Depth << "] One: 0x" << toString(Known.One, 16, false)
123 << "\n";
124}
125
126
127void GISelValueTracking::computeKnownBitsMin(Register Src0, Register Src1,
129 const APInt &DemandedElts,
130 unsigned Depth) {
131
133
134
136 return;
137
138 KnownBits Known2;
140
141
143}
144
145
146
147
158
160 const APInt &DemandedElts,
161 unsigned Depth) {
163 unsigned Opcode = MI.getOpcode();
164 LLT DstTy = MRI.getType(R);
165
166
167
168
171 return;
172 }
173
174#ifndef NDEBUG
178 "DemandedElt width should equal the fixed vector number of elements");
179 } else {
181 "DemandedElt width should be 1 for scalars or scalable vectors");
182 }
183#endif
184
187
188
189
190
191
192
193
194
195
197 return;
198
199 if (!DemandedElts)
200 return;
201
203
204 switch (Opcode) {
205 default:
206 TL.computeKnownBitsForTargetInstr(*this, R, Known, DemandedElts, MRI,
208 break;
209 case TargetOpcode::G_BUILD_VECTOR: {
210
214 if (!DemandedElts[I])
215 continue;
216
218
219
221
222
224 break;
225 }
226 break;
227 }
228 case TargetOpcode::G_SPLAT_VECTOR: {
231
232
234 break;
235 }
236 case TargetOpcode::COPY:
237 case TargetOpcode::G_PHI:
238 case TargetOpcode::PHI: {
241
242
243
244 assert(MI.getOperand(0).getSubReg() == 0 && "Is this code in SSA?");
245
246
247 for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) {
249 Register SrcReg = Src.getReg();
250 LLT SrcTy = MRI.getType(SrcReg);
251
252
253
254
255
256
257
258 if (SrcReg.isVirtual() && Src.getSubReg() == 0 &&
259 SrcTy.isValid()) {
260
261
262
263 APInt NowDemandedElts = SrcTy.isFixedVector() && !DstTy.isFixedVector()
265 : DemandedElts;
266
268 Depth + (Opcode != TargetOpcode::COPY));
271
272
274 break;
275 } else {
276
278 break;
279 }
280 }
281 break;
282 }
283 case TargetOpcode::G_CONSTANT: {
285 break;
286 }
287 case TargetOpcode::G_FRAME_INDEX: {
288 int FrameIdx = MI.getOperand(1).getIndex();
289 TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);
290 break;
291 }
292 case TargetOpcode::G_SUB: {
298 break;
299 }
300 case TargetOpcode::G_XOR: {
305
306 Known ^= Known2;
307 break;
308 }
309 case TargetOpcode::G_PTR_ADD: {
311 break;
312
313 LLT Ty = MRI.getType(MI.getOperand(1).getReg());
314 if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
315 break;
316 [[fallthrough]];
317 }
318 case TargetOpcode::G_ADD: {
324 break;
325 }
326 case TargetOpcode::G_AND: {
327
332
333 Known &= Known2;
334 break;
335 }
336 case TargetOpcode::G_OR: {
337
342
343 Known |= Known2;
344 break;
345 }
346 case TargetOpcode::G_MUL: {
352 break;
353 }
354 case TargetOpcode::G_UMULH: {
360 break;
361 }
362 case TargetOpcode::G_SMULH: {
368 break;
369 }
370 case TargetOpcode::G_SELECT: {
371 computeKnownBitsMin(MI.getOperand(2).getReg(), MI.getOperand(3).getReg(),
372 Known, DemandedElts, Depth + 1);
373 break;
374 }
375 case TargetOpcode::G_SMIN: {
376
383 break;
384 }
385 case TargetOpcode::G_SMAX: {
386
393 break;
394 }
395 case TargetOpcode::G_UMIN: {
402 break;
403 }
404 case TargetOpcode::G_UMAX: {
411 break;
412 }
413 case TargetOpcode::G_FCMP:
414 case TargetOpcode::G_ICMP: {
416 break;
417 if (TL.getBooleanContents(DstTy.isVector(),
418 Opcode == TargetOpcode::G_FCMP) ==
422 break;
423 }
424 case TargetOpcode::G_SEXT: {
427
428
430 break;
431 }
432 case TargetOpcode::G_ASSERT_SEXT:
433 case TargetOpcode::G_SEXT_INREG: {
436 Known = Known.sextInReg(MI.getOperand(2).getImm());
437 break;
438 }
439 case TargetOpcode::G_ANYEXT: {
443 break;
444 }
445 case TargetOpcode::G_LOAD: {
451 break;
452 }
453 case TargetOpcode::G_SEXTLOAD:
454 case TargetOpcode::G_ZEXTLOAD: {
456 break;
461 Known = Opcode == TargetOpcode::G_SEXTLOAD
464 break;
465 }
466 case TargetOpcode::G_ASHR: {
473 break;
474 }
475 case TargetOpcode::G_LSHR: {
482 break;
483 }
484 case TargetOpcode::G_SHL: {
491 break;
492 }
493 case TargetOpcode::G_INTTOPTR:
494 case TargetOpcode::G_PTRTOINT:
496 break;
497
498 [[fallthrough]];
499 case TargetOpcode::G_ZEXT:
500 case TargetOpcode::G_TRUNC: {
501 Register SrcReg = MI.getOperand(1).getReg();
504 break;
505 }
506 case TargetOpcode::G_ASSERT_ZEXT: {
507 Register SrcReg = MI.getOperand(1).getReg();
509
510 unsigned SrcBitWidth = MI.getOperand(2).getImm();
511 assert(SrcBitWidth && "SrcBitWidth can't be zero");
513 Known.Zero |= (~InMask);
514 Known.One &= (~Known.Zero);
515 break;
516 }
517 case TargetOpcode::G_ASSERT_ALIGN: {
518 int64_t LogOfAlign = Log2_64(MI.getOperand(2).getImm());
519
520
521
522
525 break;
526 }
527 case TargetOpcode::G_MERGE_VALUES: {
528 unsigned NumOps = MI.getNumOperands();
529 unsigned OpSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
530
531 for (unsigned I = 0; I != NumOps - 1; ++I) {
534 DemandedElts, Depth + 1);
535 Known.insertBits(SrcOpKnown, I * OpSize);
536 }
537 break;
538 }
539 case TargetOpcode::G_UNMERGE_VALUES: {
540 unsigned NumOps = MI.getNumOperands();
542 LLT SrcTy = MRI.getType(SrcReg);
543
544 if (SrcTy.isVector() && SrcTy.getScalarType() != DstTy.getScalarType())
545 return;
546
547
548 unsigned DstIdx = 0;
549 for (; DstIdx != NumOps - 1 && MI.getOperand(DstIdx).getReg() != R;
550 ++DstIdx)
551 ;
552
553 APInt SubDemandedElts = DemandedElts;
554 if (SrcTy.isVector()) {
556 SubDemandedElts =
557 DemandedElts.zext(SrcTy.getNumElements()).shl(DstIdx * DstLanes);
558 }
559
562
563 if (SrcTy.isVector())
564 Known = std::move(SrcOpKnown);
565 else
567 break;
568 }
569 case TargetOpcode::G_BSWAP: {
570 Register SrcReg = MI.getOperand(1).getReg();
573 break;
574 }
575 case TargetOpcode::G_BITREVERSE: {
576 Register SrcReg = MI.getOperand(1).getReg();
579 break;
580 }
581 case TargetOpcode::G_CTPOP: {
584
585
589
590
591 break;
592 }
593 case TargetOpcode::G_UBFX: {
594 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
602 break;
603 }
604 case TargetOpcode::G_SBFX: {
605 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
615
616
620 break;
621 }
622 case TargetOpcode::G_UADDO:
623 case TargetOpcode::G_UADDE:
624 case TargetOpcode::G_SADDO:
625 case TargetOpcode::G_SADDE:
626 case TargetOpcode::G_USUBO:
627 case TargetOpcode::G_USUBE:
628 case TargetOpcode::G_SSUBO:
629 case TargetOpcode::G_SSUBE:
630 case TargetOpcode::G_UMULO:
631 case TargetOpcode::G_SMULO: {
632 if (MI.getOperand(1).getReg() == R) {
633
634
635 if (TL.getBooleanContents(DstTy.isVector(), false) ==
639 }
640 break;
641 }
642 case TargetOpcode::G_CTLZ:
643 case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
647
651 break;
652 }
653 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
657
659
660 LLT VecVT = MRI.getType(InVec);
661
663 break;
664
667
668
670 break;
671
674
675
676
678 if (ConstEltNo && ConstEltNo->ult(NumSrcElts))
679 DemandedSrcElts =
681
683 break;
684 }
685 case TargetOpcode::G_SHUFFLE_VECTOR: {
686 APInt DemandedLHS, DemandedRHS;
687
688
689 unsigned NumElts = MRI.getType(MI.getOperand(1).getReg()).getNumElements();
691 DemandedElts, DemandedLHS, DemandedRHS))
692 break;
693
694
697 if (!!DemandedLHS) {
701 }
702
704 break;
705 if (!!DemandedRHS) {
709 }
710 break;
711 }
712 case TargetOpcode::G_CONCAT_VECTORS: {
713 if (MRI.getType(MI.getOperand(0).getReg()).isScalableVector())
714 break;
715
718 unsigned NumSubVectorElts =
719 MRI.getType(MI.getOperand(1).getReg()).getNumElements();
720
722 APInt DemandedSub =
723 DemandedElts.extractBits(NumSubVectorElts, I * NumSubVectorElts);
724 if (!!DemandedSub) {
726
728 }
729
731 break;
732 }
733 break;
734 }
735 case TargetOpcode::G_ABS: {
736 Register SrcReg = MI.getOperand(1).getReg();
738 Known = Known.abs();
740 1);
741 break;
742 }
743 }
744
746}
747
749 Ty = Ty.getScalarType();
753}
754
757 unsigned Depth) {
758 LLT Ty = MRI.getType(R);
759 APInt DemandedElts =
761 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known, Depth);
762}
763
764void GISelValueTracking::computeKnownFPClassForFPTrunc(
769 return;
770
771 Register Val = MI.getOperand(1).getReg();
772 KnownFPClass KnownSrc;
773 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
775
776
777
780
782
783
784}
785
786void GISelValueTracking::computeKnownFPClass(Register R,
787 const APInt &DemandedElts,
790 unsigned Depth) {
791 assert(Known.isUnknown() && "should not be called with known information");
792
793 if (!DemandedElts) {
794
796 return;
797 }
798
800
801 MachineInstr &MI = *MRI.getVRegDef(R);
802 unsigned Opcode = MI.getOpcode();
803 LLT DstTy = MRI.getType(R);
804
807 return;
808 }
809
811 switch (Cst->getKind()) {
813 auto APF = Cst->getScalarValue();
815 Known.SignBit = APF.isNegative();
816 break;
817 }
820 bool SignBitAllZero = true;
821 bool SignBitAllOne = true;
822
823 for (auto C : *Cst) {
825 if (C.isNegative())
826 SignBitAllZero = false;
827 else
828 SignBitAllOne = false;
829 }
830
831 if (SignBitAllOne != SignBitAllZero)
832 Known.SignBit = SignBitAllOne;
833
834 break;
835 }
838 break;
839 }
840 }
841
842 return;
843 }
844
847 KnownNotFromFlags |= fcNan;
849 KnownNotFromFlags |= fcInf;
850
851
852
853 InterestedClasses &= ~KnownNotFromFlags;
854
855 auto ClearClassesFromFlags =
857
858
860 return;
861
862 const MachineFunction *MF = MI.getMF();
863
864 switch (Opcode) {
865 default:
866 TL.computeKnownFPClassForTargetInstr(*this, R, Known, DemandedElts, MRI,
868 break;
869 case TargetOpcode::G_FNEG: {
870 Register Val = MI.getOperand(1).getReg();
871 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known, Depth + 1);
872 Known.fneg();
873 break;
874 }
875 case TargetOpcode::G_SELECT: {
880
883
888
893
894
895
896
897
898 bool LookThroughFAbsFNeg = CmpLHS != LHS && CmpLHS != RHS;
899 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
900 fcmpImpliesClass(Pred, *MF, CmpLHS, CmpRHS, LookThroughFAbsFNeg);
905 MaskIfTrue = TestedMask;
906 MaskIfFalse = ~TestedMask;
907 }
908
909 if (TestedValue == LHS) {
910
911 FilterLHS = MaskIfTrue;
912 } else if (TestedValue == RHS) {
913
914 FilterRHS = MaskIfFalse;
915 }
916
917 KnownFPClass Known2;
918 computeKnownFPClass(LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
921
922 computeKnownFPClass(RHS, DemandedElts, InterestedClasses & FilterRHS,
923 Known2, Depth + 1);
925
926 Known |= Known2;
927 break;
928 }
929 case TargetOpcode::G_FCOPYSIGN: {
930 Register Magnitude = MI.getOperand(1).getReg();
931 Register Sign = MI.getOperand(2).getReg();
932
933 KnownFPClass KnownSign;
934
935 computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,
937 computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,
940 break;
941 }
942 case TargetOpcode::G_FMA:
943 case TargetOpcode::G_STRICT_FMA:
944 case TargetOpcode::G_FMAD: {
946 break;
947
951
953 break;
954
955
957
958
959 KnownFPClass KnownAddend;
960 computeKnownFPClass(C, DemandedElts, InterestedClasses, KnownAddend,
962
965 break;
966 }
967 case TargetOpcode::G_FSQRT:
968 case TargetOpcode::G_STRICT_FSQRT: {
969 KnownFPClass KnownSrc;
970 FPClassTest InterestedSrcs = InterestedClasses;
971 if (InterestedClasses & fcNan)
973
974 Register Val = MI.getOperand(1).getReg();
975
976 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);
977
982
983
986
987
989 break;
990 }
991 case TargetOpcode::G_FABS: {
993 Register Val = MI.getOperand(1).getReg();
994
995
996 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
998 }
999 Known.fabs();
1000 break;
1001 }
1002 case TargetOpcode::G_FSIN:
1003 case TargetOpcode::G_FCOS:
1004 case TargetOpcode::G_FSINCOS: {
1005
1006 Register Val = MI.getOperand(1).getReg();
1007 KnownFPClass KnownSrc;
1008
1009 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1012
1015 break;
1016 }
1017 case TargetOpcode::G_FMAXNUM:
1018 case TargetOpcode::G_FMINNUM:
1019 case TargetOpcode::G_FMINNUM_IEEE:
1020 case TargetOpcode::G_FMAXIMUM:
1021 case TargetOpcode::G_FMINIMUM:
1022 case TargetOpcode::G_FMAXNUM_IEEE:
1023 case TargetOpcode::G_FMAXIMUMNUM:
1024 case TargetOpcode::G_FMINIMUMNUM: {
1027 KnownFPClass KnownLHS, KnownRHS;
1028
1029 computeKnownFPClass(LHS, DemandedElts, InterestedClasses, KnownLHS,
1031 computeKnownFPClass(RHS, DemandedElts, InterestedClasses, KnownRHS,
1033
1035 Known = KnownLHS | KnownRHS;
1036
1037
1038 if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
1039 Opcode == TargetOpcode::G_FMAXNUM ||
1040 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1041 Opcode == TargetOpcode::G_FMAXIMUMNUM))
1043
1044 if (Opcode == TargetOpcode::G_FMAXNUM ||
1045 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1046 Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
1047
1048
1054 } else if (Opcode == TargetOpcode::G_FMAXIMUM) {
1055
1056
1060 } else if (Opcode == TargetOpcode::G_FMINNUM ||
1061 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1062 Opcode == TargetOpcode::G_FMINNUM_IEEE) {
1063
1064
1070 } else if (Opcode == TargetOpcode::G_FMINIMUM) {
1071
1072
1076 } else {
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1089 DenormalMode Mode =
1093 }
1094
1100 else
1102 } else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1103 Opcode == TargetOpcode::G_FMINIMUM) ||
1104 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1105 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1106 Opcode == TargetOpcode::G_FMAXNUM_IEEE ||
1107 Opcode == TargetOpcode::G_FMINNUM_IEEE ||
1108
1113 if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1114 Opcode == TargetOpcode::G_FMAXNUM ||
1115 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1116 Opcode == TargetOpcode::G_FMAXNUM_IEEE) &&
1117 (KnownLHS.SignBit == false || KnownRHS.SignBit == false))
1119 else if ((Opcode == TargetOpcode::G_FMINIMUM ||
1120 Opcode == TargetOpcode::G_FMINNUM ||
1121 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1122 Opcode == TargetOpcode::G_FMINNUM_IEEE) &&
1123 (KnownLHS.SignBit == true || KnownRHS.SignBit == true))
1125 }
1126 }
1127 break;
1128 }
1129 case TargetOpcode::G_FCANONICALIZE: {
1130 Register Val = MI.getOperand(1).getReg();
1131 KnownFPClass KnownSrc;
1132 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1134
1135
1136
1137
1138
1139
1140
1142
1143
1144
1147 else
1149
1150
1151
1154 DenormalMode DenormMode = MF->getDenormalMode(FPType);
1160 break;
1161 }
1162
1165
1170
1171 break;
1172 }
1173 case TargetOpcode::G_VECREDUCE_FMAX:
1174 case TargetOpcode::G_VECREDUCE_FMIN:
1175 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1176 case TargetOpcode::G_VECREDUCE_FMINIMUM: {
1177 Register Val = MI.getOperand(1).getReg();
1178
1179
1180
1181 Known =
1182 computeKnownFPClass(Val, MI.getFlags(), InterestedClasses, Depth + 1);
1183
1186 break;
1187 }
1188 case TargetOpcode::G_TRUNC:
1189 case TargetOpcode::G_FFLOOR:
1190 case TargetOpcode::G_FCEIL:
1191 case TargetOpcode::G_FRINT:
1192 case TargetOpcode::G_FNEARBYINT:
1193 case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
1194 case TargetOpcode::G_INTRINSIC_ROUND: {
1195 Register Val = MI.getOperand(1).getReg();
1196 KnownFPClass KnownSrc;
1197 FPClassTest InterestedSrcs = InterestedClasses;
1202 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);
1203
1204
1206
1208
1209
1210
1211
1216
1217 break;
1218 }
1219 case TargetOpcode::G_FEXP:
1220 case TargetOpcode::G_FEXP2:
1221 case TargetOpcode::G_FEXP10: {
1223 if ((InterestedClasses & fcNan) == fcNone)
1224 break;
1225
1226 Register Val = MI.getOperand(1).getReg();
1227 KnownFPClass KnownSrc;
1228 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1233 }
1234
1235 break;
1236 }
1237 case TargetOpcode::G_FLOG:
1238 case TargetOpcode::G_FLOG2:
1239 case TargetOpcode::G_FLOG10: {
1240
1241
1242
1243
1245 break;
1246
1247 FPClassTest InterestedSrcs = InterestedClasses;
1250 if ((InterestedClasses & fcNan) != fcNone)
1252
1253 Register Val = MI.getOperand(1).getReg();
1254 KnownFPClass KnownSrc;
1255 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);
1256
1259
1262
1265 DenormalMode Mode = MF->getDenormalMode(FltSem);
1266
1269
1270 break;
1271 }
1272 case TargetOpcode::G_FPOWI: {
1274 break;
1275
1277 LLT ExpTy = MRI.getType(Exp);
1279 Exp, ExpTy.isVector() ? DemandedElts : APInt(1, 1), Depth + 1);
1280
1281 if (ExponentKnownBits.Zero[0]) {
1283 break;
1284 }
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294 Register Val = MI.getOperand(1).getReg();
1295 KnownFPClass KnownSrc;
1296 computeKnownFPClass(Val, DemandedElts, fcNegative, KnownSrc, Depth + 1);
1299 break;
1300 }
1301 case TargetOpcode::G_FLDEXP:
1302 case TargetOpcode::G_STRICT_FLDEXP: {
1303 Register Val = MI.getOperand(1).getReg();
1304 KnownFPClass KnownSrc;
1305 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1307 Known.propagateNaN(KnownSrc, true);
1308
1309
1314
1319
1320
1322 if ((InterestedClasses & ExpInfoMask) == fcNone)
1323 break;
1325 break;
1326
1327
1328
1329 break;
1330 }
1331 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
1332 computeKnownFPClassForFPTrunc(MI, DemandedElts, InterestedClasses, Known,
1334 break;
1335 }
1336 case TargetOpcode::G_FADD:
1337 case TargetOpcode::G_STRICT_FADD:
1338 case TargetOpcode::G_FSUB:
1339 case TargetOpcode::G_STRICT_FSUB: {
1342 KnownFPClass KnownLHS, KnownRHS;
1343 bool WantNegative =
1344 (Opcode == TargetOpcode::G_FADD ||
1345 Opcode == TargetOpcode::G_STRICT_FADD) &&
1347 bool WantNaN = (InterestedClasses & fcNan) != fcNone;
1348 bool WantNegZero = (InterestedClasses & fcNegZero) != fcNone;
1349
1350 if (!WantNaN && !WantNegative && !WantNegZero)
1351 break;
1352
1353 FPClassTest InterestedSrcs = InterestedClasses;
1354 if (WantNegative)
1356 if (InterestedClasses & fcNan)
1357 InterestedSrcs |= fcInf;
1358 computeKnownFPClass(RHS, DemandedElts, InterestedSrcs, KnownRHS, Depth + 1);
1359
1362 WantNegZero ||
1363 (Opcode == TargetOpcode::G_FSUB ||
1364 Opcode == TargetOpcode::G_STRICT_FSUB)) {
1365
1366
1367
1368 computeKnownFPClass(LHS, DemandedElts, InterestedSrcs, KnownLHS,
1370
1371
1375
1376 if (Opcode == Instruction::FAdd) {
1380
1381
1386
1389 } else {
1390
1395
1398 }
1399 }
1400
1401 break;
1402 }
1403 case TargetOpcode::G_FMUL:
1404 case TargetOpcode::G_STRICT_FMUL: {
1407
1410
1411 if ((InterestedClasses & fcNan) != fcNan)
1412 break;
1413
1414
1416
1417 KnownFPClass KnownLHS, KnownRHS;
1418 computeKnownFPClass(RHS, DemandedElts, NeedForNan, KnownRHS, Depth + 1);
1420 break;
1421
1422 computeKnownFPClass(LHS, DemandedElts, NeedForNan, KnownLHS, Depth + 1);
1424 break;
1425
1429 else
1431 }
1432
1433
1436 break;
1437 }
1438
1446
1447 break;
1448 }
1449 case TargetOpcode::G_FDIV:
1450 case TargetOpcode::G_FREM: {
1453
1455
1456 if (Opcode == TargetOpcode::G_FDIV) {
1457
1459 } else {
1460
1462 }
1463
1464 break;
1465 }
1466
1467 const bool WantNan = (InterestedClasses & fcNan) != fcNone;
1468 const bool WantNegative = (InterestedClasses & fcNegative) != fcNone;
1469 const bool WantPositive = Opcode == TargetOpcode::G_FREM &&
1471 if (!WantNan && !WantNegative && !WantPositive)
1472 break;
1473
1474 KnownFPClass KnownLHS, KnownRHS;
1475
1477 KnownRHS, Depth + 1);
1478
1479 bool KnowSomethingUseful =
1481
1482 if (KnowSomethingUseful || WantPositive) {
1486
1487 computeKnownFPClass(LHS, DemandedElts, InterestedClasses & InterestedLHS,
1488 KnownLHS, Depth + 1);
1489 }
1490
1491 if (Opcode == Instruction::FDiv) {
1492
1501 }
1502
1503
1504
1508 } else {
1509
1515 }
1516
1517
1522
1523
1528 }
1529
1530 break;
1531 }
1532 case TargetOpcode::G_FPEXT: {
1533 Register Dst = MI.getOperand(0).getReg();
1534 Register Src = MI.getOperand(1).getReg();
1535
1536 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known, Depth + 1);
1537
1540 LLT SrcTy = MRI.getType(Src).getScalarType();
1542
1543
1550 }
1551
1552
1554 Known.SignBit = std::nullopt;
1555 break;
1556 }
1557 case TargetOpcode::G_FPTRUNC: {
1558 computeKnownFPClassForFPTrunc(MI, DemandedElts, InterestedClasses, Known,
1560 break;
1561 }
1562 case TargetOpcode::G_SITOFP:
1563 case TargetOpcode::G_UITOFP: {
1564
1566
1567
1569
1570
1572 if (Opcode == TargetOpcode::G_UITOFP)
1574
1575 Register Val = MI.getOperand(1).getReg();
1576 LLT Ty = MRI.getType(Val);
1577
1578 if (InterestedClasses & fcInf) {
1579
1580
1581
1583 if (Opcode == TargetOpcode::G_SITOFP)
1584 --IntSize;
1585
1586
1587
1592 }
1593
1594 break;
1595 }
1596
1597 case TargetOpcode::G_BUILD_VECTOR:
1598 case TargetOpcode::G_CONCAT_VECTORS: {
1600
1602 break;
1603
1604 bool First = true;
1605 for (unsigned Idx = 0; Idx < Merge.getNumSources(); ++Idx) {
1606
1607 bool NeedsElt = DemandedElts[Idx];
1608
1609
1610 if (NeedsElt) {
1613 computeKnownFPClass(Src, Known, InterestedClasses, Depth + 1);
1615 } else {
1616 KnownFPClass Known2;
1617 computeKnownFPClass(Src, Known2, InterestedClasses, Depth + 1);
1618 Known |= Known2;
1619 }
1620
1621
1623 break;
1624 }
1625 }
1626
1627 break;
1628 }
1629 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1630
1631
1632
1636
1638
1639 LLT VecTy = MRI.getType(Vec);
1640
1644 if (CIdx && CIdx->ult(NumElts))
1646 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
1648 }
1649
1650 break;
1651 }
1652 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1657
1658 LLT VecTy = MRI.getType(Vec);
1659
1661 return;
1662
1664
1665 unsigned NumElts = DemandedElts.getBitWidth();
1666 APInt DemandedVecElts = DemandedElts;
1667 bool NeedsElt = true;
1668
1669 if (CIdx && CIdx->ult(NumElts)) {
1670 DemandedVecElts.clearBit(CIdx->getZExtValue());
1671 NeedsElt = DemandedElts[CIdx->getZExtValue()];
1672 }
1673
1674
1675 if (NeedsElt) {
1676 computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1);
1677
1679 break;
1680 } else {
1682 }
1683
1684
1685 if (!DemandedVecElts.isZero()) {
1686 KnownFPClass Known2;
1687 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
1689 Known |= Known2;
1690 }
1691
1692 break;
1693 }
1694 case TargetOpcode::G_SHUFFLE_VECTOR: {
1695
1696
1698 APInt DemandedLHS, DemandedRHS;
1700 assert(DemandedElts == APInt(1, 1));
1701 DemandedLHS = DemandedRHS = DemandedElts;
1702 } else {
1704 DemandedElts, DemandedLHS,
1705 DemandedRHS)) {
1707 return;
1708 }
1709 }
1710
1711 if (!!DemandedLHS) {
1713 computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known,
1715
1716
1718 break;
1719 } else {
1721 }
1722
1723 if (!!DemandedRHS) {
1724 KnownFPClass Known2;
1726 computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2,
1728 Known |= Known2;
1729 }
1730 break;
1731 }
1732 case TargetOpcode::COPY: {
1733 Register Src = MI.getOperand(1).getReg();
1734
1735 if (!Src.isVirtual())
1736 return;
1737
1738 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Depth + 1);
1739 break;
1740 }
1741 }
1742}
1743
1745GISelValueTracking::computeKnownFPClass(Register R, const APInt &DemandedElts,
1747 unsigned Depth) {
1749 computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses, Depth);
1750 return KnownClasses;
1751}
1752
1756 computeKnownFPClass(R, Known, InterestedClasses, Depth);
1757 return Known;
1758}
1759
1764 InterestedClasses &= ~fcNan;
1766 InterestedClasses &= ~fcInf;
1767
1769 computeKnownFPClass(R, DemandedElts, InterestedClasses, Depth);
1770
1772 Result.KnownFPClasses &= ~fcNan;
1774 Result.KnownFPClasses &= ~fcInf;
1775 return Result;
1776}
1777
1780 LLT Ty = MRI.getType(R);
1781 APInt DemandedElts =
1783 return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses, Depth);
1784}
1785
1786
1787unsigned GISelValueTracking::computeNumSignBitsMin(Register Src0, Register Src1,
1788 const APInt &DemandedElts,
1789 unsigned Depth) {
1790
1792 if (Src1SignBits == 1)
1793 return 1;
1795}
1796
1797
1798
1799
1801 unsigned TyBits) {
1803 if (!Ranges)
1804 return 1;
1805
1809 case TargetOpcode::G_SEXTLOAD:
1811 break;
1812 case TargetOpcode::G_ZEXTLOAD:
1814 break;
1815 default:
1816 break;
1817 }
1818 }
1819
1822}
1823
1825 const APInt &DemandedElts,
1826 unsigned Depth) {
1828 unsigned Opcode = MI.getOpcode();
1829
1830 if (Opcode == TargetOpcode::G_CONSTANT)
1831 return MI.getOperand(1).getCImm()->getValue().getNumSignBits();
1832
1834 return 1;
1835
1836 if (!DemandedElts)
1837 return 1;
1838
1839 LLT DstTy = MRI.getType(R);
1841
1842
1843
1844
1845
1847 return 1;
1848
1849 unsigned FirstAnswer = 1;
1850 switch (Opcode) {
1851 case TargetOpcode::COPY: {
1853 if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&
1854 MRI.getType(Src.getReg()).isValid()) {
1855
1857 }
1858
1859 return 1;
1860 }
1861 case TargetOpcode::G_SEXT: {
1862 Register Src = MI.getOperand(1).getReg();
1863 LLT SrcTy = MRI.getType(Src);
1864 unsigned Tmp = DstTy.getScalarSizeInBits() - SrcTy.getScalarSizeInBits();
1866 }
1867 case TargetOpcode::G_ASSERT_SEXT:
1868 case TargetOpcode::G_SEXT_INREG: {
1869
1870 Register Src = MI.getOperand(1).getReg();
1871 unsigned SrcBits = MI.getOperand(2).getImm();
1872 unsigned InRegBits = TyBits - SrcBits + 1;
1874 InRegBits);
1875 }
1876 case TargetOpcode::G_LOAD: {
1878 if (DemandedElts != 1 || ().isLittleEndian())
1879 break;
1880
1882 }
1883 case TargetOpcode::G_SEXTLOAD: {
1885
1886
1888 return 1;
1889
1891 if (NumBits != 1)
1892 return NumBits;
1893
1894
1897 }
1898 case TargetOpcode::G_ZEXTLOAD: {
1900
1901
1903 return 1;
1904
1906 if (NumBits != 1)
1907 return NumBits;
1908
1909
1912 }
1913 case TargetOpcode::G_AND:
1914 case TargetOpcode::G_OR:
1915 case TargetOpcode::G_XOR: {
1916 Register Src1 = MI.getOperand(1).getReg();
1917 unsigned Src1NumSignBits =
1919 if (Src1NumSignBits != 1) {
1920 Register Src2 = MI.getOperand(2).getReg();
1921 unsigned Src2NumSignBits =
1923 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits);
1924 }
1925 break;
1926 }
1927 case TargetOpcode::G_ASHR: {
1928 Register Src1 = MI.getOperand(1).getReg();
1929 Register Src2 = MI.getOperand(2).getReg();
1932 FirstAnswer = std::min<uint64_t>(FirstAnswer + *C, TyBits);
1933 break;
1934 }
1935 case TargetOpcode::G_SHL: {
1936 Register Src1 = MI.getOperand(1).getReg();
1937 Register Src2 = MI.getOperand(2).getReg();
1938 if (std::optional ShAmtRange =
1940 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
1941 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
1942
1943 MachineInstr &ExtMI = *MRI.getVRegDef(Src1);
1944 unsigned ExtOpc = ExtMI.getOpcode();
1945
1946
1947
1948
1949
1950
1951 if (ExtOpc == TargetOpcode::G_SEXT || ExtOpc == TargetOpcode::G_ZEXT ||
1952 ExtOpc == TargetOpcode::G_ANYEXT) {
1953 LLT ExtTy = MRI.getType(Src1);
1955 LLT ExtendeeTy = MRI.getType(Extendee);
1958
1959 if (SizeDiff <= MinShAmt) {
1960 unsigned Tmp =
1962 if (MaxShAmt < Tmp)
1963 return Tmp - MaxShAmt;
1964 }
1965 }
1966
1968 if (MaxShAmt < Tmp)
1969 return Tmp - MaxShAmt;
1970 }
1971 break;
1972 }
1973 case TargetOpcode::G_TRUNC: {
1974 Register Src = MI.getOperand(1).getReg();
1975 LLT SrcTy = MRI.getType(Src);
1976
1977
1979 unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
1981 if (NumSrcSignBits > (NumSrcBits - DstTyBits))
1982 return NumSrcSignBits - (NumSrcBits - DstTyBits);
1983 break;
1984 }
1985 case TargetOpcode::G_SELECT: {
1986 return computeNumSignBitsMin(MI.getOperand(2).getReg(),
1987 MI.getOperand(3).getReg(), DemandedElts,
1989 }
1990 case TargetOpcode::G_SMIN:
1991 case TargetOpcode::G_SMAX:
1992 case TargetOpcode::G_UMIN:
1993 case TargetOpcode::G_UMAX:
1994
1995 return computeNumSignBitsMin(MI.getOperand(1).getReg(),
1996 MI.getOperand(2).getReg(), DemandedElts,
1998 case TargetOpcode::G_SADDO:
1999 case TargetOpcode::G_SADDE:
2000 case TargetOpcode::G_UADDO:
2001 case TargetOpcode::G_UADDE:
2002 case TargetOpcode::G_SSUBO:
2003 case TargetOpcode::G_SSUBE:
2004 case TargetOpcode::G_USUBO:
2005 case TargetOpcode::G_USUBE:
2006 case TargetOpcode::G_SMULO:
2007 case TargetOpcode::G_UMULO: {
2008
2009
2010
2011 if (MI.getOperand(1).getReg() == R) {
2012 if (TL.getBooleanContents(DstTy.isVector(), false) ==
2014 return TyBits;
2015 }
2016
2017 break;
2018 }
2019 case TargetOpcode::G_SUB: {
2020 Register Src2 = MI.getOperand(2).getReg();
2021 unsigned Src2NumSignBits =
2023 if (Src2NumSignBits == 1)
2024 return 1;
2025
2026
2027 Register Src1 = MI.getOperand(1).getReg();
2029 if (Known1.isZero()) {
2031
2032
2033 if ((Known2.Zero | 1).isAllOnes())
2034 return TyBits;
2035
2036
2037
2038
2040 FirstAnswer = Src2NumSignBits;
2041 break;
2042 }
2043
2044
2045 }
2046
2047 unsigned Src1NumSignBits =
2049 if (Src1NumSignBits == 1)
2050 return 1;
2051
2052
2053
2054 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2055 break;
2056 }
2057 case TargetOpcode::G_ADD: {
2058 Register Src2 = MI.getOperand(2).getReg();
2059 unsigned Src2NumSignBits =
2061 if (Src2NumSignBits <= 2)
2062 return 1;
2063
2064 Register Src1 = MI.getOperand(1).getReg();
2065 unsigned Src1NumSignBits =
2067 if (Src1NumSignBits == 1)
2068 return 1;
2069
2070
2074
2075
2076 if ((Known1.Zero | 1).isAllOnes())
2077 return TyBits;
2078
2079
2080
2082 FirstAnswer = Src1NumSignBits;
2083 break;
2084 }
2085
2086
2087 }
2088
2089
2090
2091 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2092 break;
2093 }
2094 case TargetOpcode::G_FCMP:
2095 case TargetOpcode::G_ICMP: {
2096 bool IsFP = Opcode == TargetOpcode::G_FCMP;
2097 if (TyBits == 1)
2098 break;
2099 auto BC = TL.getBooleanContents(DstTy.isVector(), IsFP);
2101 return TyBits;
2103 return TyBits - 1;
2104 break;
2105 }
2106 case TargetOpcode::G_BUILD_VECTOR: {
2107
2108 FirstAnswer = TyBits;
2109 APInt SingleDemandedElt(1, 1);
2111 if (!DemandedElts[I])
2112 continue;
2113
2114 unsigned Tmp2 =
2116 FirstAnswer = std::min(FirstAnswer, Tmp2);
2117
2118
2119 if (FirstAnswer == 1)
2120 break;
2121 }
2122 break;
2123 }
2124 case TargetOpcode::G_CONCAT_VECTORS: {
2125 if (MRI.getType(MI.getOperand(0).getReg()).isScalableVector())
2126 break;
2127 FirstAnswer = TyBits;
2128
2129
2130 unsigned NumSubVectorElts =
2131 MRI.getType(MI.getOperand(1).getReg()).getNumElements();
2133 APInt DemandedSub =
2134 DemandedElts.extractBits(NumSubVectorElts, I * NumSubVectorElts);
2135 if (!DemandedSub)
2136 continue;
2138
2139 FirstAnswer = std::min(FirstAnswer, Tmp2);
2140
2141
2142 if (FirstAnswer == 1)
2143 break;
2144 }
2145 break;
2146 }
2147 case TargetOpcode::G_SHUFFLE_VECTOR: {
2148
2149
2150 APInt DemandedLHS, DemandedRHS;
2151 Register Src1 = MI.getOperand(1).getReg();
2152 unsigned NumElts = MRI.getType(Src1).getNumElements();
2154 DemandedElts, DemandedLHS, DemandedRHS))
2155 return 1;
2156
2157 if (!!DemandedLHS)
2159
2160 if (FirstAnswer == 1)
2161 break;
2162 if (!!DemandedRHS) {
2163 unsigned Tmp2 =
2165 FirstAnswer = std::min(FirstAnswer, Tmp2);
2166 }
2167 break;
2168 }
2169 case TargetOpcode::G_SPLAT_VECTOR: {
2170
2171 Register Src = MI.getOperand(1).getReg();
2173 unsigned NumSrcBits = MRI.getType(Src).getSizeInBits();
2174 if (NumSrcSignBits > (NumSrcBits - TyBits))
2175 return NumSrcSignBits - (NumSrcBits - TyBits);
2176 break;
2177 }
2178 case TargetOpcode::G_INTRINSIC:
2179 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2180 case TargetOpcode::G_INTRINSIC_CONVERGENT:
2181 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2182 default: {
2183 unsigned NumBits =
2184 TL.computeNumSignBitsForTargetInstr(*this, R, DemandedElts, MRI, Depth);
2185 if (NumBits > 1)
2186 FirstAnswer = std::max(FirstAnswer, NumBits);
2187 break;
2188 }
2189 }
2190
2191
2192
2196 Mask = Known.Zero;
2197 } else if (Known.isNegative()) {
2198 Mask = Known.One;
2199 } else {
2200
2201 return FirstAnswer;
2202 }
2203
2204
2205
2206 Mask <<= Mask.getBitWidth() - TyBits;
2207 return std::max(FirstAnswer, Mask.countl_one());
2208}
2209
2211 LLT Ty = MRI.getType(R);
2212 APInt DemandedElts =
2215}
2216
2219
2221 unsigned Opcode = MI.getOpcode();
2222
2223 LLT Ty = MRI.getType(R);
2224 unsigned BitWidth = Ty.getScalarSizeInBits();
2225
2226 if (Opcode == TargetOpcode::G_CONSTANT) {
2227 const APInt &ShAmt = MI.getOperand(1).getCImm()->getValue();
2229 return std::nullopt;
2231 }
2232
2233 if (Opcode == TargetOpcode::G_BUILD_VECTOR) {
2234 const APInt *MinAmt = nullptr, *MaxAmt = nullptr;
2235 for (unsigned I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
2236 if (!DemandedElts[I])
2237 continue;
2238 MachineInstr *Op = MRI.getVRegDef(MI.getOperand(I + 1).getReg());
2239 if (Op->getOpcode() != TargetOpcode::G_CONSTANT) {
2240 MinAmt = MaxAmt = nullptr;
2241 break;
2242 }
2243
2244 const APInt &ShAmt = Op->getOperand(1).getCImm()->getValue();
2246 return std::nullopt;
2247 if (!MinAmt || MinAmt->ugt(ShAmt))
2248 MinAmt = &ShAmt;
2249 if (!MaxAmt || MaxAmt->ult(ShAmt))
2250 MaxAmt = &ShAmt;
2251 }
2252 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
2253 "Failed to find matching min/max shift amounts");
2254 if (MinAmt && MaxAmt)
2256 }
2257
2258
2259
2263
2264 return std::nullopt;
2265}
2266
2269 if (std::optional AmtRange =
2271 return AmtRange->getUnsignedMin().getZExtValue();
2272 return std::nullopt;
2273}
2274
2280
2285
2287 if (!Info) {
2288 unsigned MaxDepth =
2290 Info = std::make_unique(MF, MaxDepth);
2291 }
2292 return *Info;
2293}
2294
2295AnalysisKey GISelValueTrackingAnalysis::Key;
2296
2302
2308 OS << "name: ";
2310 OS << '\n';
2311
2315 if (!MO.isReg() || MO.getReg().isPhysical())
2316 continue;
2318 if (.getType(Reg).isValid())
2319 continue;
2320 KnownBits Known = VTA.getKnownBits(Reg);
2321 unsigned SignedBits = VTA.computeNumSignBits(Reg);
2322 OS << " " << MO << " KnownBits:" << Known << " SignBits:" << SignedBits
2323 << '\n';
2324 };
2325 }
2326 }
2328}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Utilities for dealing with flags related to floating point properties and mode controls.
static void dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth)
Definition GISelValueTracking.cpp:116
static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld, unsigned TyBits)
Compute the known number of sign bits with attached range metadata in the memory operand.
Definition GISelValueTracking.cpp:1800
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
Contains matchers for matching SSA Machine Instructions.
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file describes how to lower LLVM code to machine code.
static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This class represents a range of values.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
Represents any generic load, including sign/zero extending variants.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...
GISelValueTracking & get(MachineFunction &MF)
Definition GISelValueTracking.cpp:2286
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
Definition GISelValueTracking.cpp:2281
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition GISelValueTracking.cpp:2275
GISelValueTracking Result
LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
Definition GISelValueTracking.cpp:2298
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
Definition GISelValueTracking.cpp:2304
unsigned getMaxDepth() const
KnownBits getKnownBits(Register R)
Definition GISelValueTracking.cpp:83
Align computeKnownAlignment(Register R, unsigned Depth=0)
Definition GISelValueTracking.cpp:55
std::optional< ConstantRange > getValidShiftAmountRange(Register R, const APInt &DemandedElts, unsigned Depth)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
Definition GISelValueTracking.cpp:2217
bool maskedValueIsZero(Register Val, const APInt &Mask)
std::optional< uint64_t > getValidMinimumShiftAmount(Register R, const APInt &DemandedElts, unsigned Depth=0)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
Definition GISelValueTracking.cpp:2267
bool signBitIsZero(Register Op)
Definition GISelValueTracking.cpp:101
const DataLayout & getDataLayout() const
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
Definition GISelValueTracking.cpp:1824
APInt getKnownOnes(Register R)
Definition GISelValueTracking.cpp:111
KnownBits getKnownBits(MachineInstr &MI)
Definition GISelValueTracking.cpp:77
APInt getKnownZeroes(Register R)
Definition GISelValueTracking.cpp:107
void computeKnownBitsImpl(Register R, KnownBits &Known, const APInt &DemandedElts, unsigned Depth=0)
Definition GISelValueTracking.cpp:159
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getSrc2Reg() const
Register getSrc1Reg() const
ArrayRef< int > getMask() const
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
TypeSize getValue() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
operand_type_match m_Pred()
bind_ty< FPClassTest > m_FPClassTest(FPClassTest &T)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
ClassifyOp_match< LHS, Test, TargetOpcode::G_IS_FPCLASS > m_GIsFPClass(const LHS &L, const Test &T)
Matches the register and immediate used in a fpclass test G_IS_FPCLASS val, 96.
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
constexpr unsigned MaxAnalysisRecursionDepth
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
static uint32_t extractBits(uint64_t Val, uint32_t Hi, uint32_t Lo)
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
bool isAllOnes() const
Returns true if value is all one bits.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a zero.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a negative zero.