LLVM: lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
22
23using namespace llvm;
25
26#define DEBUG_TYPE "instcombine"
27
30 cl::desc("Verify that computeKnownBits() and "
31 "SimplifyDemandedBits() are consistent"),
33
35 "instcombine-simplify-vector-elts-depth",
37 "Depth limit when simplifying vector instructions and their operands"),
39
40
41
42
44 const APInt &Demanded) {
45 assert(I && "No instruction?");
46 assert(OpNo < I->getNumOperands() && "Operand index too large");
47
48
49 Value *Op = I->getOperand(OpNo);
52 return false;
53
54
55 if (C->isSubsetOf(Demanded))
56 return false;
57
58
59 I->setOperand(OpNo, ConstantInt::get(Op->getType(), *C & Demanded));
60
61 return true;
62}
63
64
65
66
67
68
69
71 const APInt &DemandedMask,
74 assert(I->getOpcode() == Instruction::LShr &&
75 "Only lshr instruction supported");
76
79 if ((I->getOperand(0),
83 return nullptr;
84
86 return nullptr;
87
89 if (DemandedBitWidth > ShlAmt)
90 return nullptr;
91
92
93 if (Upper->getType()->getScalarSizeInBits() < ShlAmt + DemandedBitWidth)
94 return nullptr;
95
98 return nullptr;
99
100 Value *ShrAmt = I->getOperand(1);
102
103
104
105 if (~KnownShrBits.Zero != ShlAmt)
106 return nullptr;
107
112 ShrAmt->getName() + ".z");
113
114
119}
120
121
122
124 if (unsigned BitWidth = Ty->getScalarSizeInBits())
126
127 return DL.getPointerTypeSizeInBits(Ty);
128}
129
130
131
136 SQ.getWithInstruction(&Inst));
137 if (!V) return false;
138 if (V == &Inst) return true;
140 return true;
141}
142
143
144
149
150
151
152
154 const APInt &DemandedMask,
157 unsigned Depth) {
158 Use &U = I->getOperandUse(OpNo);
159 Value *V = U.get();
162 return false;
163 }
164
166 if (DemandedMask.isZero()) {
167
169 return true;
170 }
171
173 if (!VInst) {
175 return false;
176 }
177
179 return false;
180
183
185 } else {
186
187
188 NewVal =
190 }
191 if (!NewVal) return false;
194
196 return true;
197}
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
223 const APInt &DemandedMask,
226 unsigned Depth) {
227 assert(I != nullptr && "Null pointer of Value???");
234 "Value *V, DemandedMask and Known must have same BitWidth");
235
237
238
239
240 auto disableWrapFlagsBasedOnUnusedHighBits = [](Instruction *I,
241 unsigned NLZ) {
242 if (NLZ > 0) {
243
244
245
246 I->setHasNoSignedWrap(false);
247 I->setHasNoUnsignedWrap(false);
248 }
249 return I;
250 };
251
252
253
254 auto simplifyOperandsBasedOnUnusedHighBits = [&](APInt &DemandedFromOps) {
255 unsigned NLZ = DemandedMask.countl_zero();
256
257
263 disableWrapFlagsBasedOnUnusedHighBits(I, NLZ);
264 return true;
265 }
266 return false;
267 };
268
269 switch (I->getOpcode()) {
270 default:
272 break;
273 case Instruction::And: {
274
278 return I;
279
282
283
284
287
288
289
290 if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
291 return I->getOperand(0);
292 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
293 return I->getOperand(1);
294
295
297 return I;
298
299 break;
300 }
301 case Instruction::Or: {
302
306
307 I->dropPoisonGeneratingFlags();
308 return I;
309 }
310
313
314
315
318
319
320
321 if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
322 return I->getOperand(0);
323 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
324 return I->getOperand(1);
325
326
328 return I;
329
330
333 RHSCache(I->getOperand(1), RHSKnown);
336 return I;
337 }
338 }
339
340 break;
341 }
342 case Instruction::Xor: {
345 return I;
346 Value *LHS, *RHS;
347 if (DemandedMask == 1 &&
350
353 auto *Xor = Builder.CreateXor(LHS, RHS);
354 return Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, Xor);
355 }
356
359
360
361
364
365
366
368 return I->getOperand(0);
369 if (DemandedMask.isSubsetOf(LHSKnown.Zero))
370 return I->getOperand(1);
371
372
373
374
375 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero)) {
377 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1));
382 }
383
384
385
386
387
391 ~RHSKnown.One & DemandedMask);
392 Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
394 }
395
396
397
398
400 if (match(I->getOperand(1), m_APInt(C)) && ->isAllOnes()) {
401 if ((*C | ~DemandedMask).isAllOnes()) {
402
404 return I;
405 }
406
408 return I;
409 }
410
411
412
413
414
417 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
420 (LHSKnown.One & RHSKnown.One & DemandedMask) != 0) {
421 APInt NewMask = ~(LHSKnown.One & RHSKnown.One & DemandedMask);
422
423 Constant *AndC = ConstantInt::get(VTy, NewMask & AndRHS->getValue());
424 Instruction *NewAnd = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
426
427 Constant *XorC = ConstantInt::get(VTy, NewMask & XorRHS->getValue());
428 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC);
430 }
431 }
432 break;
433 }
434 case Instruction::Select: {
437 return I;
438
439
440
441
442
443
444 auto CanonicalizeSelectConstant = [](Instruction *I, unsigned OpNo,
445 const APInt &DemandedMask) {
446 const APInt *SelC;
448 return false;
449
450
451
452
453
455 const APInt *CmpC;
459
460
461 if (*CmpC == *SelC)
462 return false;
463
464
465 if ((*CmpC & DemandedMask) == (*SelC & DemandedMask)) {
466 I->setOperand(OpNo, ConstantInt::get(I->getType(), *CmpC));
467 return true;
468 }
470 };
471 if (CanonicalizeSelectConstant(I, 1, DemandedMask) ||
472 CanonicalizeSelectConstant(I, 2, DemandedMask))
473 return I;
474
475
477 false, Q, Depth);
479 true, Q, Depth);
481 break;
482 }
483 case Instruction::Trunc: {
484
485
489
490
493
497 return Builder.CreateLShr(Trunc, C->getZExtValue());
498 }
499 }
500 }
501 [[fallthrough]];
502 case Instruction::ZExt: {
503 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
504
505 APInt InputDemandedMask = DemandedMask.zextOrTrunc(SrcBitWidth);
506 KnownBits InputKnown(SrcBitWidth);
509
510
511 I->dropPoisonGeneratingFlags();
512 return I;
513 }
514 assert(InputKnown.getBitWidth() == SrcBitWidth && "Src width changed?");
515 if (I->getOpcode() == Instruction::ZExt && I->hasNonNeg() &&
519
520 break;
521 }
522 case Instruction::SExt: {
523
524 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
525
526 APInt InputDemandedBits = DemandedMask.trunc(SrcBitWidth);
527
528
529
531 InputDemandedBits.setBit(SrcBitWidth-1);
532
533 KnownBits InputKnown(SrcBitWidth);
535 return I;
536
537
538
541
545 }
546
547
548
550 break;
551 }
552 case Instruction::Add: {
553 if ((DemandedMask & 1) == 0) {
554
555
559 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType()) {
560
561
562
563
564
565
569 return Builder.CreateSExt(AndNot, VTy);
570 }
571
572
574 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
575 (I->getOperand(0)->hasOneUse() || I->getOperand(1)->hasOneUse())) {
576
577
578
579
580
581
582
586 return Builder.CreateSExt(Or, VTy);
587 }
588 }
589
590
591
592 unsigned NLZ = DemandedMask.countl_zero();
596 return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ);
597
598
599
600
601 unsigned NTZ = (~DemandedMask & RHSKnown.Zero).countr_one();
602 APInt DemandedFromLHS = DemandedFromOps;
606 return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ);
607
608
609
611 return I->getOperand(0);
612 if (DemandedFromOps.isSubsetOf(LHSKnown.Zero))
613 return I->getOperand(1);
614
615
616 {
622 return Builder.CreateXor(I->getOperand(0), ConstantInt::get(VTy, *C));
623 }
624 }
625
626
629 Known = KnownBits::add(LHSKnown, RHSKnown, NSW, NUW);
630 break;
631 }
632 case Instruction::Sub: {
633
634
635 unsigned NLZ = DemandedMask.countl_zero();
639 return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ);
640
641
642
643
644 unsigned NTZ = (~DemandedMask & RHSKnown.Zero).countr_one();
645 APInt DemandedFromLHS = DemandedFromOps;
649 return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ);
650
651
652
654 return I->getOperand(0);
655
656
657 if (DemandedFromOps.isOne() && DemandedFromOps.isSubsetOf(LHSKnown.Zero))
658 return I->getOperand(1);
659
660
661 const APInt *LHSC;
666 return Builder.CreateNot(I->getOperand(1));
667 }
668
669
672 Known = KnownBits::sub(LHSKnown, RHSKnown, NSW, NUW);
673 break;
674 }
675 case Instruction::Mul: {
676 APInt DemandedFromOps;
677 if (simplifyOperandsBasedOnUnusedHighBits(DemandedFromOps))
678 return I;
679
681
682
683
684 unsigned CTZ = DemandedMask.countr_zero();
686 if (match(I->getOperand(1), m_APInt(C)) && C->countr_zero() == CTZ) {
687 Constant *ShiftC = ConstantInt::get(VTy, CTZ);
688 Instruction *Shl = BinaryOperator::CreateShl(I->getOperand(0), ShiftC);
690 }
691 }
692
693
694
695 if (I->getOperand(0) == I->getOperand(1) && DemandedMask.ult(4)) {
696 Constant *One = ConstantInt::get(VTy, 1);
697 Instruction *And1 = BinaryOperator::CreateAnd(I->getOperand(0), One);
699 }
700
702 break;
703 }
704 case Instruction::Shl: {
707 const APInt *ShrAmt;
711 DemandedMask, Known))
712 return R;
713
714
715 if (I->hasOneUse()) {
717 if (Inst && Inst->getOpcode() == BinaryOperator::Or) {
719 auto [IID, FShiftArgs] = *Opt;
720 if ((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
721 FShiftArgs[0] == FShiftArgs[1]) {
723 break;
724 }
725 }
726 }
727 }
728
729
730
732 if (DemandedMask.countr_zero() >= ShiftAmt) {
733 if (I->hasNoSignedWrap()) {
735 unsigned SignBits =
737 if (SignBits > ShiftAmt && SignBits - ShiftAmt >= NumHiDemandedBits)
738 return I->getOperand(0);
739 }
740
741
742
743
744
748 Constant *LeftShiftAmtC = ConstantInt::get(VTy, ShiftAmt);
750 LeftShiftAmtC, DL);
752 LeftShiftAmtC, DL) == C) {
753 Instruction *Lshr = BinaryOperator::CreateLShr(NewC, X);
755 }
756 }
757 }
758
759 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
760
761
767
769 return I;
770
775 } else {
776
777
778
779 if (unsigned CTLZ = DemandedMask.countl_zero()) {
782
783 I->dropPoisonGeneratingFlags();
784 return I;
785 }
786 }
788 }
789 break;
790 }
791 case Instruction::LShr: {
795
796
797 if (I->hasOneUse()) {
799 if (Inst && Inst->getOpcode() == BinaryOperator::Or) {
801 auto [IID, FShiftArgs] = *Opt;
802 if ((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
803 FShiftArgs[0] == FShiftArgs[1]) {
805 break;
806 }
807 }
808 }
809 }
810
811
812
813 if (DemandedMask.countl_zero() >= ShiftAmt) {
814
815
817 unsigned SignBits =
819 if (SignBits >= NumHiDemandedBits)
820 return I->getOperand(0);
821
822
823
824
825
829 Constant *RightShiftAmtC = ConstantInt::get(VTy, ShiftAmt);
831 RightShiftAmtC, DL);
833 RightShiftAmtC, DL) == C) {
834 Instruction *Shl = BinaryOperator::CreateShl(NewC, X);
836 }
837 }
838
839 const APInt *Factor;
840 if (match(I->getOperand(0),
844 X, ConstantInt::get(X->getType(), Factor->lshr(ShiftAmt)));
846 }
847 }
848
849
850 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
852
853 I->dropPoisonGeneratingFlags();
854 return I;
855 }
856 Known >>= ShiftAmt;
857 if (ShiftAmt)
858 Known.Zero.setHighBits(ShiftAmt);
859 break;
860 }
863 return V;
864
866 break;
867 }
868 case Instruction::AShr: {
870
871
872
874 if (SignBits >= NumHiDemandedBits)
875 return I->getOperand(0);
876
877
878
879
880
881 if (DemandedMask.isOne()) {
882
883 Instruction *NewVal = BinaryOperator::CreateLShr(
884 I->getOperand(0), I->getOperand(1), I->getName());
886 }
887
891
892
893 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
894
895
896 bool ShiftedInBitsDemanded = DemandedMask.countl_zero() < ShiftAmt;
897 if (ShiftedInBitsDemanded)
900
901 I->dropPoisonGeneratingFlags();
902 return I;
903 }
904
905
906
907 if (Known.Zero[BitWidth - 1] || !ShiftedInBitsDemanded) {
908 BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0),
909 I->getOperand(1));
913 }
914
917 ShiftAmt != 0, I->isExact());
918 } else {
920 }
921 break;
922 }
923 case Instruction::UDiv: {
924
927
928 unsigned RHSTrailingZeros = SA->countr_zero();
929 APInt DemandedMaskIn =
932
933
934 I->dropPoisonGeneratingFlags();
935 return I;
936 }
937
940 } else {
942 }
943 break;
944 }
945 case Instruction::SRem: {
946 const APInt *Rem;
948 if (DemandedMask.ult(*Rem))
949 return I->getOperand(0);
950
951 APInt LowBits = *Rem - 1;
954 return I;
956 break;
957 }
958
960 break;
961 }
962 case Instruction::Call: {
963 bool KnownBitsComputed = false;
965 switch (II->getIntrinsicID()) {
966 case Intrinsic::abs: {
967 if (DemandedMask == 1)
968 return II->getArgOperand(0);
969 break;
970 }
971 case Intrinsic::ctpop: {
972
973
974
979 II->getModule(), Intrinsic::ctpop, VTy);
981 }
982 break;
983 }
984 case Intrinsic::bswap: {
985
986
987 unsigned NLZ = DemandedMask.countl_zero();
988 unsigned NTZ = DemandedMask.countr_zero();
989
990
991
992
995
996 if (BitWidth - NLZ - NTZ == 8) {
997
998
1000 if (NLZ > NTZ)
1001 NewVal = BinaryOperator::CreateLShr(
1002 II->getArgOperand(0), ConstantInt::get(VTy, NLZ - NTZ));
1003 else
1004 NewVal = BinaryOperator::CreateShl(
1005 II->getArgOperand(0), ConstantInt::get(VTy, NTZ - NLZ));
1008 }
1009 break;
1010 }
1011 case Intrinsic::ptrmask: {
1012 unsigned MaskWidth = I->getOperand(1)->getType()->getScalarSizeInBits();
1013 RHSKnown = KnownBits(MaskWidth);
1014
1017 I, 1, (DemandedMask & ~LHSKnown.Zero).zextOrTrunc(MaskWidth),
1018 RHSKnown, Q, Depth + 1))
1019 return I;
1020
1021
1023
1024 Known = LHSKnown & RHSKnown;
1025 KnownBitsComputed = true;
1026
1027
1028
1029
1030
1035
1036
1037
1038
1039
1040 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
1041 return I->getOperand(0);
1042
1043
1045 I, 1, (DemandedMask & ~LHSKnown.Zero).zextOrTrunc(MaskWidth)))
1046 return I;
1047
1048
1049
1050
1051
1052 Value *InnerPtr;
1058
1060 if (!LHSKnown.isZero()) {
1061 const unsigned trailingZeros = LHSKnown.countMinTrailingZeros();
1062 uint64_t PointerAlignBits = (uint64_t(1) << trailingZeros) - 1;
1063
1064 uint64_t HighBitsGEPIndex = GEPIndex & ~PointerAlignBits;
1065 uint64_t MaskedLowBitsGEPIndex =
1066 GEPIndex & PointerAlignBits & PtrMaskImmediate;
1067
1068 uint64_t MaskedGEPIndex = HighBitsGEPIndex | MaskedLowBitsGEPIndex;
1069
1070 if (MaskedGEPIndex != GEPIndex) {
1073 Type *GEPIndexType =
1074 DL.getIndexType(GEP->getPointerOperand()->getType());
1076 GEP->getSourceElementType(), InnerPtr,
1077 ConstantInt::get(GEPIndexType, MaskedGEPIndex),
1078 GEP->getName(), GEP->isInBounds());
1079
1081 return I;
1082 }
1083 }
1084 }
1085
1086 break;
1087 }
1088
1089 case Intrinsic::fshr:
1090 case Intrinsic::fshl: {
1091 const APInt *SA;
1093 break;
1094
1095
1096
1098 if (II->getIntrinsicID() == Intrinsic::fshr)
1099 ShiftAmt = BitWidth - ShiftAmt;
1100
1101 APInt DemandedMaskLHS(DemandedMask.lshr(ShiftAmt));
1102 APInt DemandedMaskRHS(DemandedMask.shl(BitWidth - ShiftAmt));
1103 if (I->getOperand(0) != I->getOperand(1)) {
1108
1109 I->dropPoisonGeneratingReturnAttributes();
1110 return I;
1111 }
1112 } else {
1113
1114
1116 if (DemandedMaskLHS.isSubsetOf(LHSKnown.Zero | LHSKnown.One) &&
1119 return I;
1120 }
1121
1126 return I;
1127 }
1128 }
1129
1130 LHSKnown <<= ShiftAmt;
1131 RHSKnown >>= BitWidth - ShiftAmt;
1132 Known = LHSKnown.unionWith(RHSKnown);
1133 KnownBitsComputed = true;
1134 break;
1135 }
1136 case Intrinsic::umax: {
1137
1138
1139
1141 unsigned CTZ = DemandedMask.countr_zero();
1143 CTZ >= C->getActiveBits())
1144 return II->getArgOperand(0);
1145 break;
1146 }
1147 case Intrinsic::umin: {
1148
1149
1150
1151
1153 unsigned CTZ = DemandedMask.countr_zero();
1155 CTZ >= C->getBitWidth() - C->countl_one())
1156 return II->getArgOperand(0);
1157 break;
1158 }
1159 default: {
1160
1162 *II, DemandedMask, Known, KnownBitsComputed);
1163 if (V)
1164 return *V;
1165 break;
1166 }
1167 }
1168 }
1169
1170 if (!KnownBitsComputed)
1172 break;
1173 }
1174 }
1175
1176 if (I->getType()->isPointerTy()) {
1177 Align Alignment = I->getPointerAlignment(DL);
1179 }
1180
1181
1182
1183
1184
1185 if (->getType()->isPointerTy() &&
1188
1191 if (Known != ReferenceKnown) {
1192 errs() << "Mismatched known bits for " << *I << " in "
1193 << I->getFunction()->getName() << "\n";
1194 errs() << "computeKnownBits(): " << ReferenceKnown << "\n";
1195 errs() << "SimplifyDemandedBits(): " << Known << "\n";
1196 std::abort();
1197 }
1198 }
1199
1200 return nullptr;
1201}
1202
1203
1204
1205
1210 Type *ITy = I->getType();
1211
1214
1215
1216
1217
1218
1219 switch (I->getOpcode()) {
1220 case Instruction::And: {
1226
1227
1228
1231
1232
1233
1235 return I->getOperand(0);
1237 return I->getOperand(1);
1238
1239 break;
1240 }
1241 case Instruction::Or: {
1247
1248
1249
1252
1253
1254
1255
1256
1258 return I->getOperand(0);
1260 return I->getOperand(1);
1261
1262 break;
1263 }
1264 case Instruction::Xor: {
1270
1271
1272
1275
1276
1277
1278
1280 return I->getOperand(0);
1282 return I->getOperand(1);
1283
1284 break;
1285 }
1286 case Instruction::Add: {
1287 unsigned NLZ = DemandedMask.countl_zero();
1289
1290
1291
1294 return I->getOperand(0);
1295
1298 return I->getOperand(1);
1299
1302 Known = KnownBits::add(LHSKnown, RHSKnown, NSW, NUW);
1304 break;
1305 }
1306 case Instruction::Sub: {
1307 unsigned NLZ = DemandedMask.countl_zero();
1309
1310
1311
1314 return I->getOperand(0);
1315
1319 Known = KnownBits::sub(LHSKnown, RHSKnown, NSW, NUW);
1321 break;
1322 }
1323 case Instruction::AShr: {
1324
1326
1327
1328
1331
1332
1333
1334
1335
1336 const APInt *ShiftRC;
1337 const APInt *ShiftLC;
1342 ShiftLC == ShiftRC && ShiftLC->ult(BitWidth) &&
1345 return X;
1346 }
1347
1348 break;
1349 }
1350 default:
1351
1353
1354
1355
1358
1359 break;
1360 }
1361
1362 return nullptr;
1363}
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1385 if (!ShlOp1 || !ShrOp1)
1386 return nullptr;
1387
1390 unsigned BitWidth = Ty->getScalarSizeInBits();
1392 return nullptr;
1393
1396
1399 Known.Zero &= DemandedMask;
1400
1403
1404 bool isLshr = (Shr->getOpcode() == Instruction::LShr);
1405 BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) :
1406 (BitMask1.ashr(ShrAmt) << ShlAmt);
1407
1408 if (ShrAmt <= ShlAmt) {
1409 BitMask2 <<= (ShlAmt - ShrAmt);
1410 } else {
1411 BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt):
1412 BitMask2.ashr(ShrAmt - ShlAmt);
1413 }
1414
1415
1416 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) {
1417 if (ShrAmt == ShlAmt)
1418 return VarX;
1419
1421 return nullptr;
1422
1424 if (ShrAmt < ShlAmt) {
1425 Constant *Amt = ConstantInt::get(VarX->getType(), ShlAmt - ShrAmt);
1426 New = BinaryOperator::CreateShl(VarX, Amt);
1430 } else {
1431 Constant *Amt = ConstantInt::get(VarX->getType(), ShrAmt - ShlAmt);
1432 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) :
1433 BinaryOperator::CreateAShr(VarX, Amt);
1435 New->setIsExact(true);
1436 }
1437
1439 }
1440
1441 return nullptr;
1442}
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1458 APInt DemandedElts,
1459 APInt &PoisonElts,
1461 bool AllowMultipleUsers) {
1462
1463
1465 return nullptr;
1466
1469 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
1470
1472
1473 PoisonElts = EltMask;
1474 return nullptr;
1475 }
1476
1477 if (DemandedElts.isZero()) {
1478 PoisonElts = EltMask;
1480 }
1481
1482 PoisonElts = 0;
1483
1485
1486
1488 return nullptr;
1489
1493 for (unsigned i = 0; i != VWidth; ++i) {
1494 if (!DemandedElts[i]) {
1496 PoisonElts.setBit(i);
1497 continue;
1498 }
1499
1500 Constant *Elt = C->getAggregateElement(i);
1501 if (!Elt) return nullptr;
1502
1505 PoisonElts.setBit(i);
1506 }
1507
1508
1510 return NewCV != C ? NewCV : nullptr;
1511 }
1512
1513
1515 return nullptr;
1516
1517 if (!AllowMultipleUsers) {
1518
1519
1520
1521 if (!V->hasOneUse()) {
1522
1523
1524
1526
1527 return nullptr;
1528
1529
1530 DemandedElts = EltMask;
1531 }
1532 }
1533
1535 if () return nullptr;
1536
1537 bool MadeChange = false;
1538 auto simplifyAndSetOp = [&](Instruction *Inst, unsigned OpNum,
1544 MadeChange = true;
1545 }
1546 };
1547
1548 APInt PoisonElts2(VWidth, 0);
1549 APInt PoisonElts3(VWidth, 0);
1550 switch (I->getOpcode()) {
1551 default: break;
1552
1553 case Instruction::GetElementPtr: {
1554
1555
1559 if (I.isStruct())
1560 return true;
1561 return false;
1562 };
1564 break;
1565
1566
1567
1568
1569
1570
1571 for (unsigned i = 0; i < I->getNumOperands(); i++) {
1574
1575 PoisonElts = EltMask;
1576 return nullptr;
1577 }
1578 if (I->getOperand(i)->getType()->isVectorTy()) {
1579 APInt PoisonEltsOp(VWidth, 0);
1580 simplifyAndSetOp(I, i, DemandedElts, PoisonEltsOp);
1581
1582
1583
1584 if (i == 0)
1585 PoisonElts |= PoisonEltsOp;
1586 }
1587 }
1588
1589 break;
1590 }
1591 case Instruction::InsertElement: {
1592
1593
1595 if (!Idx) {
1596
1597
1598 simplifyAndSetOp(I, 0, DemandedElts, PoisonElts2);
1599 break;
1600 }
1601
1602
1603
1605 APInt PreInsertDemandedElts = DemandedElts;
1606 if (IdxNo < VWidth)
1607 PreInsertDemandedElts.clearBit(IdxNo);
1608
1609
1610
1611
1612
1613
1615 if (PreInsertDemandedElts == 0 &&
1618 Vec->getType() == I->getType()) {
1619 return Vec;
1620 }
1621
1622 simplifyAndSetOp(I, 0, PreInsertDemandedElts, PoisonElts);
1623
1624
1625
1626 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1628 return I->getOperand(0);
1629 }
1630
1631
1633 break;
1634 }
1635 case Instruction::ShuffleVector: {
1637 assert(Shuffle->getOperand(0)->getType() ==
1638 Shuffle->getOperand(1)->getType() &&
1639 "Expected shuffle operands to have same type");
1641 ->getNumElements();
1642
1643
1644 if (all_of(Shuffle->getShuffleMask(), [](int Elt) { return Elt == 0; }) &&
1648 MadeChange = true;
1649 }
1650 APInt LeftDemanded(OpWidth, 1);
1651 APInt LHSPoisonElts(OpWidth, 0);
1652 simplifyAndSetOp(I, 0, LeftDemanded, LHSPoisonElts);
1653 if (LHSPoisonElts[0])
1654 PoisonElts = EltMask;
1655 else
1657 break;
1658 }
1659
1660 APInt LeftDemanded(OpWidth, 0), RightDemanded(OpWidth, 0);
1661 for (unsigned i = 0; i < VWidth; i++) {
1662 if (DemandedElts[i]) {
1663 unsigned MaskVal = Shuffle->getMaskValue(i);
1664 if (MaskVal != -1u) {
1665 assert(MaskVal < OpWidth * 2 &&
1666 "shufflevector mask index out of range!");
1667 if (MaskVal < OpWidth)
1668 LeftDemanded.setBit(MaskVal);
1669 else
1670 RightDemanded.setBit(MaskVal - OpWidth);
1671 }
1672 }
1673 }
1674
1675 APInt LHSPoisonElts(OpWidth, 0);
1676 simplifyAndSetOp(I, 0, LeftDemanded, LHSPoisonElts);
1677
1678 APInt RHSPoisonElts(OpWidth, 0);
1679 simplifyAndSetOp(I, 1, RightDemanded, RHSPoisonElts);
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692 if (VWidth == OpWidth) {
1693 bool IsIdentityShuffle = true;
1694 for (unsigned i = 0; i < VWidth; i++) {
1695 unsigned MaskVal = Shuffle->getMaskValue(i);
1696 if (DemandedElts[i] && i != MaskVal) {
1697 IsIdentityShuffle = false;
1698 break;
1699 }
1700 }
1701 if (IsIdentityShuffle)
1702 return Shuffle->getOperand(0);
1703 }
1704
1705 bool NewPoisonElts = false;
1706 unsigned LHSIdx = -1u, LHSValIdx = -1u;
1707 unsigned RHSIdx = -1u, RHSValIdx = -1u;
1708 bool LHSUniform = true;
1709 bool RHSUniform = true;
1710 for (unsigned i = 0; i < VWidth; i++) {
1711 unsigned MaskVal = Shuffle->getMaskValue(i);
1712 if (MaskVal == -1u) {
1713 PoisonElts.setBit(i);
1714 } else if (!DemandedElts[i]) {
1715 NewPoisonElts = true;
1716 PoisonElts.setBit(i);
1717 } else if (MaskVal < OpWidth) {
1718 if (LHSPoisonElts[MaskVal]) {
1719 NewPoisonElts = true;
1720 PoisonElts.setBit(i);
1721 } else {
1722 LHSIdx = LHSIdx == -1u ? i : OpWidth;
1723 LHSValIdx = LHSValIdx == -1u ? MaskVal : OpWidth;
1724 LHSUniform = LHSUniform && (MaskVal == i);
1725 }
1726 } else {
1727 if (RHSPoisonElts[MaskVal - OpWidth]) {
1728 NewPoisonElts = true;
1729 PoisonElts.setBit(i);
1730 } else {
1731 RHSIdx = RHSIdx == -1u ? i : OpWidth;
1732 RHSValIdx = RHSValIdx == -1u ? MaskVal - OpWidth : OpWidth;
1733 RHSUniform = RHSUniform && (MaskVal - OpWidth == i);
1734 }
1735 }
1736 }
1737
1738
1739
1740
1741
1742 if (OpWidth ==
1746 unsigned Idx = -1u;
1747
1748
1749 if (LHSIdx < OpWidth && RHSUniform) {
1751 Op = Shuffle->getOperand(1);
1752 Value = CV->getOperand(LHSValIdx);
1753 Idx = LHSIdx;
1754 }
1755 }
1756 if (RHSIdx < OpWidth && LHSUniform) {
1758 Op = Shuffle->getOperand(0);
1759 Value = CV->getOperand(RHSValIdx);
1760 Idx = RHSIdx;
1761 }
1762 }
1763
1769 return New;
1770 }
1771 }
1772 if (NewPoisonElts) {
1773
1775 for (unsigned i = 0; i < VWidth; ++i) {
1776 if (PoisonElts[i])
1778 else
1779 Elts.push_back(Shuffle->getMaskValue(i));
1780 }
1781 Shuffle->setShuffleMask(Elts);
1782 MadeChange = true;
1783 }
1784 break;
1785 }
1786 case Instruction::Select: {
1787
1788
1791
1792
1793
1794
1795
1796 simplifyAndSetOp(I, 0, DemandedElts, PoisonElts);
1797 }
1798
1799
1800 APInt DemandedLHS(DemandedElts), DemandedRHS(DemandedElts);
1802 for (unsigned i = 0; i < VWidth; i++) {
1804
1805
1807 DemandedLHS.clearBit(i);
1810 }
1811 }
1812
1813 simplifyAndSetOp(I, 1, DemandedLHS, PoisonElts2);
1814 simplifyAndSetOp(I, 2, DemandedRHS, PoisonElts3);
1815
1816
1817
1818 PoisonElts = PoisonElts2 & PoisonElts3;
1819 break;
1820 }
1821 case Instruction::BitCast: {
1822
1824 if (!VTy) break;
1826 APInt InputDemandedElts(InVWidth, 0);
1827 PoisonElts2 = APInt(InVWidth, 0);
1828 unsigned Ratio;
1829
1830 if (VWidth == InVWidth) {
1831
1832
1833 Ratio = 1;
1834 InputDemandedElts = DemandedElts;
1835 } else if ((VWidth % InVWidth) == 0) {
1836
1837
1838
1839 Ratio = VWidth / InVWidth;
1840 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1841 if (DemandedElts[OutIdx])
1842 InputDemandedElts.setBit(OutIdx / Ratio);
1843 } else if ((InVWidth % VWidth) == 0) {
1844
1845
1846
1847 Ratio = InVWidth / VWidth;
1848 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1849 if (DemandedElts[InIdx / Ratio])
1850 InputDemandedElts.setBit(InIdx);
1851 } else {
1852
1853 break;
1854 }
1855
1856 simplifyAndSetOp(I, 0, InputDemandedElts, PoisonElts2);
1857
1858 if (VWidth == InVWidth) {
1859 PoisonElts = PoisonElts2;
1860 } else if ((VWidth % InVWidth) == 0) {
1861
1862
1863
1864 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1865 if (PoisonElts2[OutIdx / Ratio])
1866 PoisonElts.setBit(OutIdx);
1867 } else if ((InVWidth % VWidth) == 0) {
1868
1869
1870
1871 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1873 if (SubUndef.popcount() == Ratio)
1874 PoisonElts.setBit(OutIdx);
1875 }
1876 } else {
1878 }
1879 break;
1880 }
1881 case Instruction::FPTrunc:
1882 case Instruction::FPExt:
1883 simplifyAndSetOp(I, 0, DemandedElts, PoisonElts);
1884 break;
1885
1886 case Instruction::Call: {
1888 if () break;
1889 switch (II->getIntrinsicID()) {
1890 case Intrinsic::masked_gather:
1891 case Intrinsic::masked_load: {
1892
1893
1894
1896 DemandedPassThrough(DemandedElts);
1898 for (unsigned i = 0; i < VWidth; i++) {
1900 if (CElt->isNullValue())
1901 DemandedPtrs.clearBit(i);
1902 else if (CElt->isAllOnesValue())
1903 DemandedPassThrough.clearBit(i);
1904 }
1905 }
1906 }
1907
1908 if (II->getIntrinsicID() == Intrinsic::masked_gather)
1909 simplifyAndSetOp(II, 0, DemandedPtrs, PoisonElts2);
1910 simplifyAndSetOp(II, 2, DemandedPassThrough, PoisonElts3);
1911
1912
1913
1914 PoisonElts = PoisonElts2 & PoisonElts3;
1915 break;
1916 }
1917 default: {
1918
1920 *II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
1921 simplifyAndSetOp);
1922 if (V)
1923 return *V;
1924 break;
1925 }
1926 }
1927 break;
1928 }
1929 }
1930
1931
1932
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954 if (DemandedElts == 1 && ->hasOneUse() &&
->hasOneUse() &&
1956
1957 auto findShufBO = [&](bool MatchShufAsOp0) -> User * {
1958
1959
1960
1961
1962 Value *OtherOp = MatchShufAsOp0 ? Y : X;
1964 return nullptr;
1965
1967 Value *ShufOp = MatchShufAsOp0 ? X : Y;
1968
1969 for (User *U : OtherOp->users()) {
1974 : MatchShufAsOp0
1979 return U;
1980 }
1981 return nullptr;
1982 };
1983
1984 if (User *ShufBO = findShufBO( true))
1985 return ShufBO;
1986 if (User *ShufBO = findShufBO( false))
1987 return ShufBO;
1988 }
1989
1990 simplifyAndSetOp(I, 0, DemandedElts, PoisonElts);
1991 simplifyAndSetOp(I, 1, DemandedElts, PoisonElts2);
1992
1993
1994
1995 PoisonElts &= PoisonElts2;
1996 }
1997
1998
1999
2002
2003 return MadeChange ? I : nullptr;
2004}
2005
2006
2007
2011
2014
2015
2016 if (Ty->isAggregateType())
2017 return nullptr;
2018
2019 switch (Mask) {
2026 default:
2027 return nullptr;
2028 }
2029}
2030
2035 unsigned Depth) {
2037 Type *VTy = V->getType();
2038
2040
2041 if (DemandedMask == fcNone)
2043
2045 return nullptr;
2046
2048 if () {
2049
2051 Value *FoldedToConst =
2053 return FoldedToConst == V ? nullptr : FoldedToConst;
2054 }
2055
2056 if (->hasOneUse())
2057 return nullptr;
2058
2060 if (FPOp->hasNoNaNs())
2061 DemandedMask &= ~fcNan;
2062 if (FPOp->hasNoInfs())
2063 DemandedMask &= ~fcInf;
2064 }
2065 switch (I->getOpcode()) {
2066 case Instruction::FNeg: {
2069 return I;
2070 Known.fneg();
2071 break;
2072 }
2073 case Instruction::Call: {
2076 case Intrinsic::fabs:
2079 return I;
2080 Known.fabs();
2081 break;
2082 case Intrinsic::arithmetic_fence:
2084 return I;
2085 break;
2086 case Intrinsic::copysign: {
2087
2090 return I;
2091
2092 if ((DemandedMask & fcNegative) == DemandedMask) {
2093
2094 I->setOperand(1, ConstantFP::get(VTy, -1.0));
2095 return I;
2096 }
2097
2098 if ((DemandedMask & fcPositive) == DemandedMask) {
2099
2101 return I;
2102 }
2103
2107 break;
2108 }
2109 default:
2111 break;
2112 }
2113
2114 break;
2115 }
2116 case Instruction::Select: {
2120 return I;
2121
2123 return I->getOperand(2);
2125 return I->getOperand(1);
2126
2127
2128 Known = KnownLHS | KnownRHS;
2129 break;
2130 }
2131 default:
2133 break;
2134 }
2135
2137}
2138
2142 unsigned Depth) {
2143 Use &U = I->getOperandUse(OpNo);
2146 if (!NewVal)
2147 return false;
2150
2152 return true;
2153}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file provides internal interfaces used to implement the InstCombine.
static cl::opt< unsigned > SimplifyDemandedVectorEltsDepthLimit("instcombine-simplify-vector-elts-depth", cl::desc("Depth limit when simplifying vector instructions and their operands"), cl::Hidden, cl::init(10))
static Constant * getFPClassConstant(Type *Ty, FPClassTest Mask)
For floating-point classes that resolve to a single bit pattern, return that value.
Definition InstCombineSimplifyDemanded.cpp:2008
static cl::opt< bool > VerifyKnownBits("instcombine-verify-known-bits", cl::desc("Verify that computeKnownBits() and " "SimplifyDemandedBits() are consistent"), cl::Hidden, cl::init(false))
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Definition InstCombineSimplifyDemanded.cpp:123
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
Definition InstCombineSimplifyDemanded.cpp:43
static Value * simplifyShiftSelectingPackedElement(Instruction *I, const APInt &DemandedMask, InstCombinerImpl &IC, unsigned Depth)
Let N = 2 * M.
Definition InstCombineSimplifyDemanded.cpp:70
This file provides the interface for the instcombine pass implementation.
uint64_t IntrinsicInst * II
This file contains the declarations for profiling metadata utility functions.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
void clearAllBits()
Set every bit to 0.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
bool isOne() const
Determine if this is a value of 1.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
BinaryOps getOpcode() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isOneValue() const
Returns true if the value is one.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateSelectWithUnknownProfile(Value *C, Value *True, Value *False, StringRef PassName, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Definition InstCombineSimplifyDemanded.cpp:1457
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Definition InstCombineSimplifyDemanded.cpp:153
std::optional< std::pair< Intrinsic::ID, SmallVector< Value *, 3 > > > convertOrOfShiftsToFunnelShift(Instruction &Or)
Value * simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1, Instruction *Shl, const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known)
Helper routine of SimplifyDemandedUseBits.
Definition InstCombineSimplifyDemanded.cpp:1382
Value * SimplifyDemandedUseBits(Instruction *I, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)
Attempts to replace I with a simpler value based on the demanded bits.
Definition InstCombineSimplifyDemanded.cpp:222
bool SimplifyDemandedFPClass(Instruction *I, unsigned Op, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth=0)
Definition InstCombineSimplifyDemanded.cpp:2139
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Definition InstCombineSimplifyDemanded.cpp:145
Value * SimplifyMultipleUseDemandedBits(Instruction *I, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)
Helper routine of SimplifyDemandedUseBits.
Definition InstCombineSimplifyDemanded.cpp:1206
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, unsigned Depth=0)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
Definition InstCombineSimplifyDemanded.cpp:2031
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
A wrapper class for inspecting calls to intrinsic functions.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
const Value * getCondition() const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
bool hasUseList() const
Check if this Value has a use-list.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
This class represents zero extension of integer types.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_IntrinsicIntrinsic::fabs(m_Value(X))
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
DisjointOr_match< LHS, RHS, true > m_c_DisjointOr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)
Merge bits known from context-dependent facts into Known.
decltype(auto) dyn_cast(const From &Val)
dyn_cast - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
gep_type_iterator gep_type_end(const User *GEP)
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm, bool Invert, const SimplifyQuery &Q, unsigned Depth=0)
Adjust Known for the given select Arm to include information from the select Cond.
LLVM_ABI FPClassTest fneg(FPClassTest Mask)
Return the test mask which returns true if the value's sign bit is flipped.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI FPClassTest inverse_fabs(FPClassTest Mask)
Return the test mask which returns true after fabs is applied to the value.
bool isa(const From &Val)
isa - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
constexpr int PoisonMaskElem
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
LLVM_ABI FPClassTest unknown_sign(FPClassTest Mask)
Return the test mask which returns true if the value could have the same set of classes,...
DWARFExpression::Operation Op
constexpr unsigned BitWidth
LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &SQ, unsigned Depth=0)
Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
unsigned Log2(Align A)
Returns the log2 of the alignment.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
void makeNonNegative()
Make this value non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
void copysign(const KnownFPClass &Sign)
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.