LLVM: lib/Transforms/InstCombine/InstCombineAddSub.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
33#include
34#include
35
36using namespace llvm;
37using namespace PatternMatch;
38
39#define DEBUG_TYPE "instcombine"
40
41namespace {
42
43
44
45
46
47
48
49 class FAddendCoef {
50 public:
51
52
53
54
55
56 FAddendCoef() = default;
57 ~FAddendCoef();
58
59
60
61 void operator=(const FAddendCoef &A);
63 void operator*=(const FAddendCoef &S);
64
65 void set(short C) {
66 assert(!insaneIntVal(C) && "Insane coefficient");
67 IsFp = false; IntVal = C;
68 }
69
71
72 void negate();
73
74 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
76
77 bool isOne() const { return isInt() && IntVal == 1; }
78 bool isTwo() const { return isInt() && IntVal == 2; }
79 bool isMinusOne() const { return isInt() && IntVal == -1; }
80 bool isMinusTwo() const { return isInt() && IntVal == -2; }
81
82 private:
83 bool insaneIntVal(int V) { return V > 4 || V < -4; }
84
85 APFloat *getFpValPtr() { return reinterpret_cast<APFloat *>(&FpValBuf); }
86
87 const APFloat *getFpValPtr() const {
88 return reinterpret_cast<const APFloat *>(&FpValBuf);
89 }
90
91 const APFloat &getFpVal() const {
92 assert(IsFp && BufHasFpVal && "Incorret state");
93 return *getFpValPtr();
94 }
95
97 assert(IsFp && BufHasFpVal && "Incorret state");
98 return *getFpValPtr();
99 }
100
101 bool isInt() const { return !IsFp; }
102
103
104
105 void convertToFpType(const fltSemantics &Sem);
106
107
108
109
111
112 bool IsFp = false;
113
114
115 bool BufHasFpVal = false;
116
117
118
119
120
121 short IntVal = 0;
122
124 };
125
126
127
128
129 class FAddend {
130 public:
131 FAddend() = default;
132
134 assert((Val == T.Val) && "Symbolic-values disagree");
135 Coeff += T.Coeff;
136 }
137
138 Value *getSymVal() const { return Val; }
139 const FAddendCoef &getCoef() const { return Coeff; }
140
141 bool isConstant() const { return Val == nullptr; }
142 bool isZero() const { return Coeff.isZero(); }
143
144 void set(short Coefficient, Value *V) {
145 Coeff.set(Coefficient);
146 Val = V;
147 }
148 void set(const APFloat &Coefficient, Value *V) {
149 Coeff.set(Coefficient);
150 Val = V;
151 }
153 Coeff.set(Coefficient->getValueAPF());
154 Val = V;
155 }
156
157 void negate() { Coeff.negate(); }
158
159
160
161 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
162
163
164
165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
166
167 private:
168 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
169
170
171 Value *Val = nullptr;
172 FAddendCoef Coeff;
173 };
174
175
176
177
178 class FAddCombine {
179 public:
181
183
184 private:
186
187 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
188
189
190 Value *createAddendVal(const FAddend &A, bool& NeedNeg);
191
192
193 unsigned calcInstrNumber(const AddendVect& Vect);
194
199 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
200 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
201
202
203 #ifndef NDEBUG
204 unsigned CreateInstrNum;
205 void initCreateInstNum() { CreateInstrNum = 0; }
206 void incCreateInstNum() { CreateInstrNum++; }
207 #else
208 void initCreateInstNum() {}
209 void incCreateInstNum() {}
210 #endif
211
214 };
215
216}
217
218
219
220
221
222
223
224FAddendCoef::~FAddendCoef() {
225 if (BufHasFpVal)
226 getFpValPtr()->~APFloat();
227}
228
229void FAddendCoef::set(const APFloat& C) {
231
233
234
236 } else
238
239 IsFp = BufHasFpVal = true;
240}
241
242void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
244 return;
245
247 if (IntVal > 0)
249 else {
250 new(P) APFloat(Sem, 0 - IntVal);
251 P->changeSign();
252 }
253 IsFp = BufHasFpVal = true;
254}
255
256APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
257 if (Val >= 0)
258 return APFloat(Sem, Val);
259
261 T.changeSign();
262
263 return T;
264}
265
266void FAddendCoef::operator=(const FAddendCoef &That) {
267 if (That.isInt())
268 set(That.IntVal);
269 else
270 set(That.getFpVal());
271}
272
273void FAddendCoef::operator+=(const FAddendCoef &That) {
274 RoundingMode RndMode = RoundingMode::NearestTiesToEven;
275 if (isInt() == That.isInt()) {
277 IntVal += That.IntVal;
278 else
279 getFpVal().add(That.getFpVal(), RndMode);
280 return;
281 }
282
284 const APFloat &T = That.getFpVal();
285 convertToFpType(T.getSemantics());
286 getFpVal().add(T, RndMode);
287 return;
288 }
289
291 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
292}
293
294void FAddendCoef::operator*=(const FAddendCoef &That) {
295 if (That.isOne())
296 return;
297
298 if (That.isMinusOne()) {
299 negate();
300 return;
301 }
302
303 if (isInt() && That.isInt()) {
304 int Res = IntVal * (int)That.IntVal;
305 assert(!insaneIntVal(Res) && "Insane int value");
307 return;
308 }
309
311 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
312
314 convertToFpType(Semantic);
315 APFloat &F0 = getFpVal();
316
317 if (That.isInt())
318 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
319 APFloat::rmNearestTiesToEven);
320 else
321 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
322}
323
324void FAddendCoef::negate() {
327 else
328 getFpVal().changeSign();
329}
330
331Value *FAddendCoef::getValue(Type *Ty) const {
333 ConstantFP::get(Ty, float(IntVal)) :
335}
336
337
338
339
340
341
342
343
344
345
346
347unsigned FAddend::drillValueDownOneStep
348 (Value *Val, FAddend &Addend0, FAddend &Addend1) {
350 if (!Val || !(I = dyn_cast(Val)))
351 return 0;
352
353 unsigned Opcode = I->getOpcode();
354
355 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
357 Value *Opnd0 = I->getOperand(0);
358 Value *Opnd1 = I->getOperand(1);
359 if ((C0 = dyn_cast(Opnd0)) && C0->isZero())
360 Opnd0 = nullptr;
361
362 if ((C1 = dyn_cast(Opnd1)) && C1->isZero())
363 Opnd1 = nullptr;
364
365 if (Opnd0) {
366 if (!C0)
367 Addend0.set(1, Opnd0);
368 else
369 Addend0.set(C0, nullptr);
370 }
371
372 if (Opnd1) {
373 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
374 if (!C1)
375 Addend.set(1, Opnd1);
376 else
377 Addend.set(C1, nullptr);
378 if (Opcode == Instruction::FSub)
379 Addend.negate();
380 }
381
382 if (Opnd0 || Opnd1)
383 return Opnd0 && Opnd1 ? 2 : 1;
384
385
387 return 1;
388 }
389
390 if (I->getOpcode() == Instruction::FMul) {
391 Value *V0 = I->getOperand(0);
392 Value *V1 = I->getOperand(1);
393 if (ConstantFP *C = dyn_cast(V0)) {
394 Addend0.set(C, V1);
395 return 1;
396 }
397
398 if (ConstantFP *C = dyn_cast(V1)) {
399 Addend0.set(C, V0);
400 return 1;
401 }
402 }
403
404 return 0;
405}
406
407
408
409
410unsigned FAddend::drillAddendDownOneStep
411 (FAddend &Addend0, FAddend &Addend1) const {
413 return 0;
414
415 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
416 if (!BreakNum || Coeff.isOne())
417 return BreakNum;
418
419 Addend0.Scale(Coeff);
420
421 if (BreakNum == 2)
422 Addend1.Scale(Coeff);
423
424 return BreakNum;
425}
426
428 assert(I->hasAllowReassoc() && I->hasNoSignedZeros() &&
429 "Expected 'reassoc'+'nsz' instruction");
430
431
432 if (I->getType()->isVectorTy())
433 return nullptr;
434
435 assert((I->getOpcode() == Instruction::FAdd ||
436 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
437
438
440
441 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
442
443 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
444
445
446 unsigned Opnd0_ExpNum = 0;
447 unsigned Opnd1_ExpNum = 0;
448
449 if (!Opnd0.isConstant())
450 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
451
452
453 if (OpndNum == 2 && !Opnd1.isConstant())
454 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
455
456
457 if (Opnd0_ExpNum && Opnd1_ExpNum) {
458 AddendVect AllOpnds;
459 AllOpnds.push_back(&Opnd0_0);
460 AllOpnds.push_back(&Opnd1_0);
461 if (Opnd0_ExpNum == 2)
462 AllOpnds.push_back(&Opnd0_1);
463 if (Opnd1_ExpNum == 2)
464 AllOpnds.push_back(&Opnd1_1);
465
466
467 unsigned InstQuota = 0;
468
469 Value *V0 = I->getOperand(0);
470 Value *V1 = I->getOperand(1);
471 InstQuota = ((!isa(V0) && V0->hasOneUse()) &&
472 (!isa(V1) && V1->hasOneUse())) ? 2 : 1;
473
474 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
475 return R;
476 }
477
478 if (OpndNum != 2) {
479
480
481
482
483 const FAddendCoef &CE = Opnd0.getCoef();
484 return CE.isOne() ? Opnd0.getSymVal() : nullptr;
485 }
486
487
488 if (Opnd1_ExpNum) {
489 AddendVect AllOpnds;
490 AllOpnds.push_back(&Opnd0);
491 AllOpnds.push_back(&Opnd1_0);
492 if (Opnd1_ExpNum == 2)
493 AllOpnds.push_back(&Opnd1_1);
494
495 if (Value *R = simplifyFAdd(AllOpnds, 1))
496 return R;
497 }
498
499
500 if (Opnd0_ExpNum) {
501 AddendVect AllOpnds;
502 AllOpnds.push_back(&Opnd1);
503 AllOpnds.push_back(&Opnd0_0);
504 if (Opnd0_ExpNum == 2)
505 AllOpnds.push_back(&Opnd0_1);
506
507 if (Value *R = simplifyFAdd(AllOpnds, 1))
508 return R;
509 }
510
511 return nullptr;
512}
513
514Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
515 unsigned AddendNum = Addends.size();
516 assert(AddendNum <= 4 && "Too many addends");
517
518
519 unsigned NextTmpIdx = 0;
520 FAddend TmpResult[3];
521
522
523 AddendVect SimpVect;
524
525
526
527
528 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
529
530 const FAddend *ThisAddend = Addends[SymIdx];
531 if (!ThisAddend) {
532
533 continue;
534 }
535
536 Value *Val = ThisAddend->getSymVal();
537
538
539
540
541
542
543
544
545 unsigned StartIdx = SimpVect.size();
546 SimpVect.push_back(ThisAddend);
547
548
549
550
551
552
553 for (unsigned SameSymIdx = SymIdx + 1;
554 SameSymIdx < AddendNum; SameSymIdx++) {
555 const FAddend *T = Addends[SameSymIdx];
556 if (T && T->getSymVal() == Val) {
557
558
559 Addends[SameSymIdx] = nullptr;
560 SimpVect.push_back(T);
561 }
562 }
563
564
565 if (StartIdx + 1 != SimpVect.size()) {
566 FAddend &R = TmpResult[NextTmpIdx ++];
567 R = *SimpVect[StartIdx];
568 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
569 R += *SimpVect[Idx];
570
571
572 SimpVect.resize(StartIdx);
573 if (.isZero()) {
574 SimpVect.push_back(&R);
575 }
576 }
577 }
578
579 assert((NextTmpIdx <= std::size(TmpResult) + 1) && "out-of-bound access");
580
582 if (!SimpVect.empty())
583 Result = createNaryFAdd(SimpVect, InstrQuota);
584 else {
585
586 Result = ConstantFP::get(Instr->getType(), 0.0);
587 }
588
590}
591
592Value *FAddCombine::createNaryFAdd
593 (const AddendVect &Opnds, unsigned InstrQuota) {
594 assert(!Opnds.empty() && "Expect at least one addend");
595
596
597
598 unsigned InstrNeeded = calcInstrNumber(Opnds);
599 if (InstrNeeded > InstrQuota)
600 return nullptr;
601
602 initCreateInstNum();
603
604
605
606
607
608
609
610
611
612 Value *LastVal = nullptr;
613 bool LastValNeedNeg = false;
614
615
616 for (const FAddend *Opnd : Opnds) {
617 bool NeedNeg;
618 Value *V = createAddendVal(*Opnd, NeedNeg);
619 if (!LastVal) {
620 LastVal = V;
621 LastValNeedNeg = NeedNeg;
622 continue;
623 }
624
625 if (LastValNeedNeg == NeedNeg) {
626 LastVal = createFAdd(LastVal, V);
627 continue;
628 }
629
630 if (LastValNeedNeg)
631 LastVal = createFSub(V, LastVal);
632 else
633 LastVal = createFSub(LastVal, V);
634
635 LastValNeedNeg = false;
636 }
637
638 if (LastValNeedNeg) {
639 LastVal = createFNeg(LastVal);
640 }
641
642#ifndef NDEBUG
643 assert(CreateInstrNum == InstrNeeded &&
644 "Inconsistent in instruction numbers");
645#endif
646
647 return LastVal;
648}
649
650Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
651 Value *V = Builder.CreateFSub(Opnd0, Opnd1);
652 if (Instruction *I = dyn_cast(V))
653 createInstPostProc(I);
654 return V;
655}
656
657Value *FAddCombine::createFNeg(Value *V) {
658 Value *NewV = Builder.CreateFNeg(V);
659 if (Instruction *I = dyn_cast(NewV))
660 createInstPostProc(I, true);
661 return NewV;
662}
663
664Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
665 Value *V = Builder.CreateFAdd(Opnd0, Opnd1);
666 if (Instruction *I = dyn_cast(V))
667 createInstPostProc(I);
668 return V;
669}
670
671Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
672 Value *V = Builder.CreateFMul(Opnd0, Opnd1);
673 if (Instruction *I = dyn_cast(V))
674 createInstPostProc(I);
675 return V;
676}
677
678void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
680
681
682 if (!NoNumber)
683 incCreateInstNum();
684
685
687}
688
689
690
691unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
692 unsigned OpndNum = Opnds.size();
693 unsigned InstrNeeded = OpndNum - 1;
694
695
696 for (const FAddend *Opnd : Opnds) {
697 if (Opnd->isConstant())
698 continue;
699
700
701
702 if (isa(Opnd->getSymVal()))
703 continue;
704
705 const FAddendCoef &CE = Opnd->getCoef();
706
707
708
709 if (.isMinusOne() &&
.isOne())
710 InstrNeeded++;
711 }
712 return InstrNeeded;
713}
714
715
716
717
718
719
720
721
722
723Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
724 const FAddendCoef &Coeff = Opnd.getCoef();
725
726 if (Opnd.isConstant()) {
727 NeedNeg = false;
728 return Coeff.getValue(Instr->getType());
729 }
730
731 Value *OpndVal = Opnd.getSymVal();
732
733 if (Coeff.isMinusOne() || Coeff.isOne()) {
734 NeedNeg = Coeff.isMinusOne();
735 return OpndVal;
736 }
737
738 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
739 NeedNeg = Coeff.isMinusTwo();
740 return createFAdd(OpndVal, OpndVal);
741 }
742
743 NeedNeg = false;
744 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
745}
746
747
748
749
750
751
754 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
755
756
757
759 return nullptr;
760
761 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
762 const APInt *C1 = nullptr, *C2 = nullptr;
763
764
767
769
772
774
775
778 return Builder.CreateSub(RHS, NewAnd, "sub");
780
781
784 }
785 }
786 }
787
788
791
792
795
796
797
798
804 }
805 return nullptr;
806}
807
808
811 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
815 return nullptr;
816
817
818
820 const APInt *C1, *C2;
825
828
831 Builder.CreateNUWAdd(X, ConstantInt::get(X->getType(), NewC)), Ty);
832 }
833
834
835
836
843 return BinaryOperator::CreateAdd(WideX, NewC);
844 }
845
851 return BinaryOperator::CreateAdd(WideX, NewC);
852 }
853 return nullptr;
854}
855
857 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
861 return nullptr;
862
864 return NV;
865
868
869
872
874
875
879
880
882 X->getType()->getScalarSizeInBits() == 1)
884
886 X->getType()->getScalarSizeInBits() == 1)
888
889
891
892 auto *COne = ConstantInt::get(Op1C->getType(), 1);
893 bool WillNotSOV = willNotOverflowSignedSub(Op1C, COne, Add);
897 return Res;
898 }
899
900
907
909 return nullptr;
910
911
917 willNotOverflowSignedAdd(Op01C, Op1C, Add));
919 return NewAdd;
920 }
921
922
925 return BinaryOperator::CreateXor(Op0, ConstantInt::get(Add.getType(), *C2));
926
927 if (C->isSignMask()) {
928
929
930 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap())
931 return BinaryOperator::CreateOr(Op0, Op1);
932
933
934
935 return BinaryOperator::CreateXor(Op0, Op1);
936 }
937
938
939
943
945
947 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C2 ^ *C));
948
949
950
953 if ((*C2 | LHSKnown.Zero).isAllOnes())
954 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C2 + *C), X);
955 }
956
957
958
959
960
961 if (Op0->hasOneUse() && *C2 == -(*C)) {
963 unsigned ShAmt = 0;
964 if (C->isPowerOf2())
965 ShAmt = BitWidth - C->logBase2() - 1;
969 0, &Add)) {
970 Constant *ShAmtC = ConstantInt::get(Ty, ShAmt);
972 return BinaryOperator::CreateAShr(NewShl, ShAmtC);
973 }
974 }
975 }
976
977 if (C->isOne() && Op0->hasOneUse()) {
978
979
980
981
982
984 X->getType()->getScalarSizeInBits() == 1)
986
987
988
993 return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1));
994 }
995 }
996
997
1001 Intrinsic::usub_sat, X, ConstantInt::get(Add.getType(), -*C)));
1002
1003
1004
1005 if (C->isOne()) {
1010 }
1011 }
1012
1013 return nullptr;
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024template <bool FP, typename Mul2Rhs>
1027 constexpr unsigned MulOp = FP ? Instruction::FMul : Instruction::Mul;
1028 constexpr unsigned AddOp = FP ? Instruction::FAdd : Instruction::Add;
1029 constexpr unsigned Mul2Op = FP ? Instruction::FMul : Instruction::Shl;
1030
1031
1035 MulOp,
1039 return true;
1040
1041
1042
1043
1046 AddOp,
1055}
1056
1057
1060 if (matchesSquareSum</*FP*/ false>(I, m_SpecificInt(1), A, B)) {
1062 return BinaryOperator::CreateMul(AB, AB);
1063 }
1064 return nullptr;
1065}
1066
1067
1068
1070 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() && "Assumption mismatch");
1072 if (matchesSquareSum</*FP*/ true>(I, m_SpecificFP(2.0), A, B)) {
1075 }
1076 return nullptr;
1077}
1078
1079
1080
1081
1083 const APInt *AI;
1085 C = *AI;
1086 return true;
1087 }
1090 C <<= *AI;
1091 return true;
1092 }
1093 return false;
1094}
1095
1096
1097
1098
1099
1101 const APInt *AI;
1102 IsSigned = false;
1104 IsSigned = true;
1105 C = *AI;
1106 return true;
1107 }
1109 C = *AI;
1110 return true;
1111 }
1113 C = *AI + 1;
1114 return true;
1115 }
1116 return false;
1117}
1118
1119
1120
1121
1123 const APInt *AI;
1125 C = *AI;
1126 return true;
1127 }
1128 if (!IsSigned) {
1130 C = *AI;
1131 return true;
1132 }
1135 C <<= *AI;
1136 return true;
1137 }
1138 }
1139 return false;
1140}
1141
1142
1144 bool overflow;
1145 if (IsSigned)
1146 (void)C0.smul_ov(C1, overflow);
1147 else
1148 (void)C0.umul_ov(C1, overflow);
1149 return overflow;
1150}
1151
1152
1153
1154
1155
1157 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1159 APInt C0, MulOpC;
1160 bool IsSigned;
1161
1164 C0 == MulOpC) {
1167 bool Rem2IsSigned;
1168
1169 if (MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) &&
1170 IsSigned == Rem2IsSigned) {
1173
1174 if (MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) && X == DivOpV &&
1176 Value *NewDivisor = ConstantInt::get(X->getType(), C0 * C1);
1179 }
1180 }
1181 }
1182
1183
1184 Value *Div, *Rem;
1187 Div = LHS, C1 = APInt(I.getType()->getScalarSizeInBits(), 1);
1189 Rem = RHS, C2 = APInt(I.getType()->getScalarSizeInBits(), 1);
1193 }
1196 if (MatchRem(Rem, X, C0, IsSigned) &&
1197 MatchDiv(Div, DivOpV, DivOpC, IsSigned) && X == DivOpV && C0 == DivOpC) {
1198 APInt NewC = C1 - C2 * C0;
1200 return nullptr;
1202 return nullptr;
1205 return MulXC2;
1207 Builder.CreateMul(Div, ConstantInt::get(X->getType(), NewC)), MulXC2);
1208 }
1209
1210 return nullptr;
1211}
1212
1213
1214
1215
1216
1217
1218
1223 return nullptr;
1224
1226 Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask");
1227
1228 if (auto *BOp = dyn_cast(NotMask)) {
1229
1230 BOp->setHasNoSignedWrap();
1231 BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1232 }
1233
1235}
1236
1238 assert(I.getOpcode() == Instruction::Add && "Expecting add instruction");
1240 auto getUAddSat = [&]() {
1242 Ty);
1243 };
1244
1245
1250
1251
1254 *C == ~*NotC)
1255 return CallInst::Create(getUAddSat(), { X, ConstantInt::get(Ty, *C) });
1256
1257 return nullptr;
1258}
1259
1260
1261
1262
1270 return BinaryOperator::CreateSub(A, NewShl);
1271 }
1272 return nullptr;
1273}
1274
1275
1277
1279 const APInt *DivC;
1282 return nullptr;
1283
1284
1285
1286
1287
1288
1289
1290
1291 const APInt *MaskC, *MaskCCmp;
1293 if ((Add.getOperand(1),
1296 return nullptr;
1297
1300 return nullptr;
1301
1304 ? (*MaskC == (SMin | (*DivC - 1)))
1305 : (*DivC == 2 && *MaskC == SMin + 1);
1306 if (!IsMaskValid)
1307 return nullptr;
1308
1309
1310 return BinaryOperator::CreateAShr(
1312}
1313
1315 bool NSW, bool NUW) {
1319 Instruction *R = BinaryOperator::CreateSub(C, B);
1322
1325 R->setHasNoSignedWrap(NSWOut);
1326 R->setHasNoUnsignedWrap(NUWOut);
1327 return R;
1328 }
1329
1330
1331 const APInt *C1, *C2;
1334 APInt MinusC1 = -(*C1);
1335 if (MinusC1 == (One << *C2)) {
1337 return BinaryOperator::CreateSRem(RHS, NewRHS);
1338 }
1339 }
1340
1341 return nullptr;
1342}
1343
1347 assert((I.getOpcode() == Instruction::Add ||
1348 I.getOpcode() == Instruction::Or ||
1349 I.getOpcode() == Instruction::Sub) &&
1350 "Expecting add/or/sub instruction");
1351
1352
1353
1360 return nullptr;
1361
1362
1363 if (I.getOpcode() == Instruction::Sub && I.getOperand(1) != Select)
1364 return nullptr;
1365
1366 Type *XTy = X->getType();
1367 bool HadTrunc = I.getType() != XTy;
1368
1369
1370
1372 return nullptr;
1373
1374
1375
1376
1377
1379 if ((LowBitsToSkip,
1382 return nullptr;
1383
1384
1385
1386 auto SkipExtInMagic = [&I](Value *&V) {
1387 if (I.getOpcode() == Instruction::Sub)
1389 else
1391 };
1392
1393
1394
1395 SkipExtInMagic(Select);
1396
1398 const APInt *Thr;
1399 Value *SignExtendingValue, *Zero;
1400 bool ShouldSignext;
1401
1402
1403
1407 return nullptr;
1408
1409
1410 if (!ShouldSignext)
1411 std::swap(SignExtendingValue, Zero);
1412
1413
1415 return nullptr;
1416
1417
1418
1419 SkipExtInMagic(SignExtendingValue);
1420 Constant *SignExtendingValueBaseConstant;
1421 if ((SignExtendingValue,
1424 return nullptr;
1425
1426 if (I.getOpcode() == Instruction::Sub
1427 ? (SignExtendingValueBaseConstant, m_One())
1428 : (SignExtendingValueBaseConstant, m_AllOnes()))
1429 return nullptr;
1430
1431 auto *NewAShr = BinaryOperator::CreateAShr(X, LowBitsToSkip,
1432 Extract->getName() + ".sext");
1433 NewAShr->copyIRFlags(Extract);
1434 if (!HadTrunc)
1435 return NewAShr;
1436
1439}
1440
1441
1442
1443
1446
1447 assert((I.getOpcode() == Instruction::Add ||
1448 I.getOpcode() == Instruction::Sub) &&
1449 "Expected add/sub");
1450 auto *Op0 = dyn_cast(I.getOperand(0));
1451 auto *Op1 = dyn_cast(I.getOperand(1));
1452 if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse()))
1453 return nullptr;
1454
1458 return nullptr;
1459
1460
1461 bool HasNSW = I.hasNoSignedWrap() && Op0->hasNoSignedWrap() &&
1462 Op1->hasNoSignedWrap();
1463 bool HasNUW = I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() &&
1464 Op1->hasNoUnsignedWrap();
1465
1466
1468 if (auto *NewI = dyn_cast(NewMath)) {
1469 NewI->setHasNoSignedWrap(HasNSW);
1470 NewI->setHasNoUnsignedWrap(HasNUW);
1471 }
1472 auto *NewShl = BinaryOperator::CreateShl(NewMath, ShAmt);
1473 NewShl->setHasNoSignedWrap(HasNSW);
1474 NewShl->setHasNoUnsignedWrap(HasNUW);
1475 return NewShl;
1476}
1477
1478
1479
1481 unsigned BitWidth = I.getType()->getScalarSizeInBits();
1482
1484 return nullptr;
1485
1486 unsigned HalfBits = BitWidth >> 1;
1488
1489
1490 Value *XLo, *YLo;
1491 Value *CrossSum;
1492
1493
1496 return nullptr;
1497
1498
1499
1500
1501
1505 return nullptr;
1506
1507
1508
1509 if (match(CrossSum,
1514 return BinaryOperator::CreateMul(X, Y);
1515
1516 return nullptr;
1517}
1518
1521 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1524
1526 return &I;
1527
1529 return X;
1530
1532 return Phi;
1533
1534
1537
1539 return R;
1540
1542 return R;
1543
1545 return X;
1546
1548 return X;
1549
1551 return R;
1552
1554 return R;
1555
1556 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1558 I.hasNoUnsignedWrap()))
1559 return R;
1561 I.hasNoUnsignedWrap()))
1562 return R;
1565 return BinaryOperator::CreateXor(LHS, RHS);
1566
1567
1569 auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1));
1570 Shl->setHasNoSignedWrap(I.hasNoSignedWrap());
1571 Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1572 return Shl;
1573 }
1574
1577
1580
1581
1582 auto *Sub = BinaryOperator::CreateSub(RHS, A);
1583 auto *OB0 = cast(LHS);
1584 Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OB0->hasNoSignedWrap());
1585
1586 return Sub;
1587 }
1588
1589
1591 auto *Sub = BinaryOperator::CreateSub(LHS, B);
1592 auto *OBO = cast(RHS);
1593 Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO->hasNoSignedWrap());
1594 return Sub;
1595 }
1596
1599
1600
1601
1602
1603
1606 return BinaryOperator::CreateSub(A, B);
1607
1608
1610 return BinaryOperator::CreateAdd(A, Builder.CreateShl(RHS, 1, "reass.add"));
1611
1612
1614 return BinaryOperator::CreateAdd(A, Builder.CreateShl(LHS, 1, "reass.add"));
1615
1616 {
1617
1624 }
1625
1626
1627
1631 return BinaryOperator::CreateAdd(Sub, C1);
1632 }
1633 }
1634
1635
1637
1638 const APInt *C1;
1639
1643 return BinaryOperator::CreateAnd(A, NewMask);
1644 }
1645
1646
1652
1653
1655 A->getType()->isIntOrIntVectorTy(1))
1657
1658
1663 A->getType()->isIntOrIntVectorTy()) {
1667 }
1668
1673 Ty,
1675 {A, B}));
1676 }
1677
1678
1681 return BinaryOperator::CreateDisjointOr(LHS, RHS);
1682
1683 if (Instruction *Ext = narrowMathIfNoOverflow(I))
1684 return Ext;
1685
1686
1687
1690 return BinaryOperator::CreateOr(A, B);
1691
1692
1693
1696
1699 return &I;
1700 }
1701
1702
1703
1704
1705
1710 I.hasNoUnsignedWrap(), I.hasNoSignedWrap());
1711 return BinaryOperator::CreateAnd(Add, A);
1712 }
1713
1714
1715
1722 return BinaryOperator::CreateAnd(Dec, Not);
1723 }
1724
1725
1726
1727
1728
1729
1734 Constant *NewMulC = ConstantInt::get(Ty, 1 - *C1);
1737 }
1738
1739
1740 const APInt *NegPow2C;
1745 return BinaryOperator::CreateSub(B, Shl);
1746 }
1747
1748
1749
1756 return BinaryOperator::CreateOr(LHS, Zext);
1757 }
1758
1759 {
1762
1763
1767
1771 Ext->hasOneUse()) {
1775 }
1776 }
1777
1778
1782 Value *OneConst = ConstantInt::get(A->getType(), 1);
1785 }
1786
1788 return Ashr;
1789
1790
1791 {
1792
1793
1794 bool ConsumesLHS, ConsumesRHS;
1799 assert(NotLHS != nullptr && NotRHS != nullptr &&
1800 "isFreeToInvert desynced with getFreelyInverted");
1802 return BinaryOperator::CreateSub(
1804 }
1805 }
1806
1808 return R;
1809
1810
1811
1812
1813 bool Changed = false;
1814 if (.hasNoSignedWrap() && willNotOverflowSignedAdd(LHSCache, RHSCache, I)) {
1815 Changed = true;
1816 I.setHasNoSignedWrap(true);
1817 }
1818 if (.hasNoUnsignedWrap() &&
1819 willNotOverflowUnsignedAdd(LHSCache, RHSCache, I)) {
1820 Changed = true;
1821 I.setHasNoUnsignedWrap(true);
1822 }
1823
1825 return V;
1826
1829 return V;
1830
1832 return SatAdd;
1833
1834
1840 }
1841
1842
1848 {Builder.CreateOr(A, B)}));
1849
1850
1851
1852
1853
1854 const APInt *XorC;
1865 *XorC == A->getType()->getScalarSizeInBits() - 1) {
1870 ConstantInt::get(A->getType(), A->getType()->getScalarSizeInBits()),
1871 Ctlz, "", true, true);
1873 }
1874
1876 return Res;
1877
1878 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
1879 return Res;
1880
1882 return Res;
1883
1884
1885
1886 if (Changed) {
1888 Value *Start, *Step;
1891 }
1892
1893 return Changed ? &I : nullptr;
1894}
1895
1896
1904 return nullptr;
1905
1906
1910}
1911
1912
1915 assert((I.getOpcode() == Instruction::FAdd ||
1916 I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub");
1917 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() &&
1918 "FP factorization requires FMF");
1919
1921 return Lerp;
1922
1923 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1924 if (!Op0->hasOneUse() || !Op1->hasOneUse())
1925 return nullptr;
1926
1928 bool IsFMul;
1933 IsFMul = true;
1936 IsFMul = false;
1937 else
1938 return nullptr;
1939
1940
1941
1942
1943
1944 bool IsFAdd = I.getOpcode() == Instruction::FAdd;
1947
1948
1949
1952 return nullptr;
1953
1956}
1957
1960 I.getFastMathFlags(),
1963
1965 return &I;
1966
1968 return X;
1969
1971 return Phi;
1972
1974 return FoldedFAdd;
1975
1976
1980
1981
1982
1988 }
1989
1990
1997 }
1998
1999
2000
2001 if (Instruction *R = foldFBinOpOfIntCasts(I))
2002 return R;
2003
2004 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2005
2008
2009 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
2011 return F;
2012
2014 return F;
2015
2016
2020
2023 {X->getType()}, {Y, X}, &I));
2024 }
2026 if (match(LHS, m_OneUse(m_IntrinsicIntrinsic::vector\_reduce\_fadd(
2029
2030 Constant *NewStartC = ConstantFP::get(I.getType(), *C + *StartC);
2033 {X->getType()}, {NewStartC, X}, &I));
2034 }
2035
2036
2041 Instruction::FAdd, MulC, ConstantFP::get(I.getType(), 1.0), DL))
2043 }
2044
2045
2049
2052 }
2053
2054
2057 m_c_IntrinsicIntrinsic::minimum(m_Deferred(X),
2060
2061
2062
2063 if (!Result->hasNoNaNs())
2064 Result->setHasNoInfs(false);
2065 return Result;
2066 }
2067
2068 return nullptr;
2069}
2070
2071
2072
2073
2075 Type *Ty, bool IsNUW) {
2076
2077
2078 bool Swapped = false;
2079 GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
2080 if (!isa(LHS) && isa(RHS)) {
2082 Swapped = true;
2083 }
2084
2085
2086 if (auto *LHSGEP = dyn_cast(LHS)) {
2087
2088 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
2090 GEP1 = LHSGEP;
2091 } else if (auto *RHSGEP = dyn_cast(RHS)) {
2092
2093 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
2094 RHSGEP->getOperand(0)->stripPointerCasts()) {
2095 GEP1 = LHSGEP;
2096 GEP2 = RHSGEP;
2097 }
2098 }
2099 }
2100
2101 if (!GEP1)
2102 return nullptr;
2103
2104
2105
2106
2107
2108 bool RewriteGEPs = GEP2 != nullptr;
2109
2110
2112 Value *Result = EmitGEPOffset(GEP1, RewriteGEPs);
2113
2114
2115
2116 if (auto *I = dyn_cast(Result))
2117 if (IsNUW && !GEP2 && !Swapped && GEP1NW.isInBounds() &&
2118 I->getOpcode() == Instruction::Mul)
2119 I->setHasNoUnsignedWrap();
2120
2121
2122
2123
2124 if (GEP2) {
2126 Value *Offset = EmitGEPOffset(GEP2, RewriteGEPs);
2131 }
2132
2133
2134 if (Swapped)
2136
2138}
2139
2142 Value *Op0 = I.getOperand(0);
2143 Value *Op1 = I.getOperand(1);
2145 auto *MinMax = dyn_cast(Op1);
2147 return nullptr;
2148
2149
2150
2158 }
2159
2160
2161
2166 return BinaryOperator::CreateAdd(X, USub);
2167 }
2170 return BinaryOperator::CreateAdd(X, USub);
2171 }
2172 }
2173
2174
2175
2181 }
2182
2183 return nullptr;
2184}
2185
2188 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
2191
2193 return X;
2194
2196 return Phi;
2197
2198 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2199
2200
2201
2202 if (Value *V = dyn_castNegVal(Op1)) {
2203 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
2204
2205 if (const auto *BO = dyn_cast(Op1)) {
2206 assert(BO->getOpcode() == Instruction::Sub &&
2207 "Expected a subtraction operator!");
2208 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
2210 } else {
2211 if (cast(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
2213 }
2214
2215 return Res;
2216 }
2217
2218
2220 return R;
2221
2226
2227
2229
2230
2231 bool WillNotSOV = willNotOverflowSignedSub(C, C2, I);
2234 auto *OBO1 = cast(Op1);
2235 Res->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO1->hasNoSignedWrap() &&
2236 WillNotSOV);
2238 OBO1->hasNoUnsignedWrap());
2239 return Res;
2240 }
2241 }
2242
2243 auto TryToNarrowDeduceFlags = [this, &I, &Op0, &Op1]() -> Instruction * {
2244 if (Instruction *Ext = narrowMathIfNoOverflow(I))
2245 return Ext;
2246
2247 bool Changed = false;
2248 if (.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) {
2249 Changed = true;
2250 I.setHasNoSignedWrap(true);
2251 }
2252 if (.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) {
2253 Changed = true;
2254 I.setHasNoUnsignedWrap(true);
2255 }
2256
2257 return Changed ? &I : nullptr;
2258 };
2259
2260
2261
2262
2264 if (!IsNegation || none_of(I.users(), [&I, Op1](const User *U) {
2265 const Instruction *UI = dyn_cast(U);
2266 if (!UI)
2267 return false;
2268 return match(UI, m_c_Select(m_Specific(Op1), m_Specific(&I)));
2269 })) {
2271 I.hasNoSignedWrap(),
2272 Op1, *this))
2273 return BinaryOperator::CreateAdd(NegOp1, Op0);
2274 }
2275 if (IsNegation)
2276 return TryToNarrowDeduceFlags();
2277
2278
2281
2282 if (I.getType()->isIntOrIntVectorTy(1))
2283 return BinaryOperator::CreateXor(Op0, Op1);
2284
2285
2288
2289
2293
2294
2300 return BinaryOperator::CreateAnd(
2302 }
2303
2304
2305
2306
2312 return BinaryOperator::CreateSub(XZ, YW);
2313 }
2314
2315
2319 bool HasNSW = HasNUW && I.hasNoSignedWrap() && LHSSub->hasNoSignedWrap();
2321 HasNSW);
2325 return Sub;
2326 }
2327
2328 {
2329
2330
2331
2332
2335 return BinaryOperator::CreateSub(X, Y);
2336
2337
2343 return BinaryOperator::CreateAdd(OpsSub, ConstsSub);
2344 }
2345 }
2346
2347 {
2352 if (W == Y)
2353 R = BinaryOperator::CreateSub(X, Z);
2354 else if (W == Z)
2355 R = BinaryOperator::CreateSub(X, Y);
2357 R = BinaryOperator::CreateSub(W, Z);
2358 else if (X == Z)
2359 R = BinaryOperator::CreateSub(W, Y);
2360 if (R) {
2361 bool NSW = I.hasNoSignedWrap() &&
2364
2365 bool NUW = I.hasNoUnsignedWrap() &&
2367 R->setHasNoSignedWrap(NSW);
2368 R->setHasNoUnsignedWrap(NUW);
2369 return R;
2370 }
2371 }
2372 }
2373
2374
2375 {
2376
2377
2378 bool ConsumesOp0, ConsumesOp1;
2380 isFreeToInvert(Op1, Op1->hasOneUse(), ConsumesOp1) &&
2381 (ConsumesOp0 || ConsumesOp1)) {
2384 assert(NotOp0 != nullptr && NotOp1 != nullptr &&
2385 "isFreeToInvert desynced with getFreelyInverted");
2386 return BinaryOperator::CreateSub(NotOp1, NotOp0);
2387 }
2388 }
2389
2390 auto m_AddRdx = [](Value *&Vec) {
2391 return m_OneUse(m_IntrinsicIntrinsic::vector\_reduce\_add(m_Value(Vec)));
2392 };
2394 if (match(Op0, m_AddRdx(V0)) && match(Op1, m_AddRdx(V1)) &&
2396
2397
2400 {Sub->getType()}, {Sub});
2402 }
2403
2404 if (Constant *C = dyn_cast(Op0)) {
2407
2410
2412
2413
2416
2417
2418 if (SelectInst *SI = dyn_cast(Op1))
2420 return R;
2421
2422
2423 if (PHINode *PN = dyn_cast(Op1))
2425 return R;
2426
2428
2429
2432 }
2433
2434 const APInt *Op0C;
2436 if (Op0C->isMask()) {
2437
2438
2439
2442 if ((*Op0C | RHSKnown.Zero).isAllOnes())
2443 return BinaryOperator::CreateXor(Op1, Op0);
2444 }
2445
2446
2447
2448
2449
2450 const APInt *C2, *C3;
2455 APInt C2AndC3 = *C2 & *C3;
2456 APInt C2AndC3Minus1 = C2AndC3 - 1;
2457 APInt C2AddC3 = *C2 + *C3;
2458 if ((*C3 - C2AndC3Minus1).isPowerOf2() &&
2459 C2AndC3Minus1.isSubsetOf(C2AddC3)) {
2461 return BinaryOperator::CreateAdd(
2462 And, ConstantInt::get(I.getType(), *Op0C - C2AndC3));
2463 }
2464 }
2465 }
2466
2467 {
2469
2472
2473
2476 }
2477
2478
2479 {
2483 return BinaryOperator::CreateXor(A, B);
2484 }
2485
2486
2487 {
2491 return BinaryOperator::CreateAnd(A, B);
2492 }
2493
2494
2495 {
2499 return BinaryOperator::CreateOr(A, B);
2500 }
2501
2502
2503 {
2507 (Op0->hasOneUse() || Op1->hasOneUse()))
2509 }
2510
2511
2512 {
2516 return BinaryOperator::CreateAnd(A, B);
2517 }
2518
2519
2520 {
2524 (Op0->hasOneUse() || Op1->hasOneUse()))
2526 }
2527
2528 {
2530
2532 return BinaryOperator::CreateAnd(
2534 }
2535
2536 {
2537
2543 }
2544 }
2545
2546 {
2547
2552 }
2553 }
2554
2555 {
2556
2557
2562 (C->getType()->getScalarSizeInBits() == 1);
2563 };
2564 if (m_SubXorCmp(Op0, Op1))
2566 if (m_SubXorCmp(Op1, Op0))
2568 }
2569
2571 return R;
2572
2574 return R;
2575
2576 {
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587 auto SinkSubIntoSelect =
2590 Value *Cond, *TrueVal, *FalseVal;
2593 return nullptr;
2594 if (OtherHandOfSub != TrueVal && OtherHandOfSub != FalseVal)
2595 return nullptr;
2596
2597
2598
2599 bool OtherHandOfSubIsTrueVal = OtherHandOfSub == TrueVal;
2600 Value *NewSub = SubBuilder(OtherHandOfSubIsTrueVal ? FalseVal : TrueVal);
2604 OtherHandOfSubIsTrueVal ? NewSub : Zero);
2605
2607 return NewSel;
2608 };
2609 if (Instruction *NewSel = SinkSubIntoSelect(
2610 Op0, Op1,
2613 Op1);
2614 }))
2615 return NewSel;
2616 if (Instruction *NewSel = SinkSubIntoSelect(
2617 Op1, Op0,
2620 OtherHandOfSelect);
2621 }))
2622 return NewSel;
2623 }
2624
2625
2627 (Op1->hasOneUse() || isa(Y)))
2628 return BinaryOperator::CreateAnd(
2630
2631
2632
2633
2634
2635
2636
2637
2642 return BinaryOperator::CreateSub(Not, X);
2643 }
2646 !Op1->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {
2648 return BinaryOperator::CreateSub(X, Not);
2649 }
2650
2651
2652
2653 Value *LHSOp, *RHSOp;
2657 I.hasNoUnsignedWrap()))
2659
2660
2664 false))
2666
2669 if (auto *GEP = dyn_cast(LHSOp)) {
2670 if (GEP->getPointerOperand() == RHSOp) {
2671 if (GEP->hasNoUnsignedWrap() || GEP->hasNoUnsignedSignedWrap()) {
2673 Value *Res = GEP->hasNoUnsignedWrap()
2676 GEP->hasNoUnsignedSignedWrap())
2679 }
2680 }
2681 }
2682 }
2683
2684
2685
2686
2687
2688
2690 const APInt *ShAmt;
2694 Op1->hasNUses(2) && *ShAmt == BitWidth - 1 &&
2696
2697
2698
2700
2701 Value *NegA = I.hasNoUnsignedWrap()
2705 }
2706
2707
2708
2709
2710
2711 const APInt *AddC, *AndC;
2716 if ((HighMask & *AndC).isZero())
2717 return BinaryOperator::CreateAnd(Op0, ConstantInt::get(Ty, ~(*AndC)));
2718 }
2719
2722 return V;
2723
2724
2729
2730
2731
2732
2736
2737
2741
2742
2746 }
2747
2748
2752 }
2753
2754
2759 {Builder.CreateNot(X)}));
2760
2761
2762
2765 auto *OBO0 = cast(Op0);
2766 auto *OBO1 = cast(Op1);
2767 bool PropagateNSW = I.hasNoSignedWrap() && OBO0->hasNoSignedWrap() &&
2768 OBO1->hasNoSignedWrap() && BitWidth > 2;
2769 bool PropagateNUW = I.hasNoUnsignedWrap() && OBO0->hasNoUnsignedWrap() &&
2770 OBO1->hasNoUnsignedWrap() && BitWidth > 1;
2775 }
2776
2777
2780 if (I.hasNoUnsignedWrap() || I.hasNoSignedWrap()) {
2786 }
2787 }
2788
2790 return Res;
2791
2792 return TryToNarrowDeduceFlags();
2793}
2794
2795
2796
2798
2799
2800
2803 return nullptr;
2804
2807
2808
2809
2813
2817
2821
2822
2823
2824
2829 return FDiv;
2830 }
2831
2832
2836
2837 return nullptr;
2838}
2839
2840Instruction *InstCombinerImpl::hoistFNegAboveFMulFDiv(Value *FNegOp,
2844
2845
2848 }
2849
2853 }
2854
2855 if (IntrinsicInst *II = dyn_cast(FNegOp)) {
2856
2857 if (II->getIntrinsicID() == Intrinsic::ldexp) {
2861 {Builder.CreateFNegFMF(II->getArgOperand(0), FMF),
2862 II->getArgOperand(1)});
2863 New->setFastMathFlags(FMF);
2865 return New;
2866 }
2867 }
2868
2869 return nullptr;
2870}
2871
2873 Value *Op = I.getOperand(0);
2874
2878
2880 return X;
2881
2883
2884
2885 if (I.hasNoSignedZeros() &&
2888
2891 return nullptr;
2892
2893 if (Instruction *R = hoistFNegAboveFMulFDiv(OneUse, I))
2895
2896
2899
2900
2901
2902 auto propagateSelectFMF = [&](SelectInst *S, bool CommonOperand) {
2904 if (auto *OldSel = dyn_cast(Op)) {
2905 FastMathFlags FMF = I.getFastMathFlags() | OldSel->getFastMathFlags();
2907 if (!OldSel->hasNoSignedZeros() && !CommonOperand &&
2910 }
2911 };
2912
2917 propagateSelectFMF(NewSel, P == Y);
2918 return NewSel;
2919 }
2920
2924 propagateSelectFMF(NewSel, P == X);
2925 return NewSel;
2926 }
2927
2928
2929
2934 propagateSelectFMF(NewSel, true);
2935 return NewSel;
2936 }
2937 }
2938
2939
2941
2942
2944 FMF &= cast(OneUse)->getFastMathFlags();
2948 }
2949
2950 return nullptr;
2951}
2952
2955 I.getFastMathFlags(),
2958
2960 return X;
2961
2963 return Phi;
2964
2965
2966
2967
2968
2969
2970
2971
2975
2977 return X;
2978
2979 if (Instruction *R = foldFBinOpOfIntCasts(I))
2980 return R;
2981
2984
2985 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2986
2987
2988
2989
2990
2991
2992 if (I.hasNoSignedZeros() ||
2997 }
2998 }
2999
3000
3001 if (I.hasNoSignedZeros() && !isa(Op0) &&
3005 }
3006
3007 if (isa(Op0))
3008 if (SelectInst *SI = dyn_cast(Op1))
3010 return NV;
3011
3012
3013
3014
3018
3019
3022
3023
3024
3028
3029
3032
3033
3034
3035
3039 }
3040
3041
3046 }
3047
3048
3051
3052 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
3053
3056
3057
3058
3061
3062
3065 Instruction::FSub, C, ConstantFP::get(Ty, 1.0), DL))
3067 }
3068
3071 Instruction::FSub, ConstantFP::get(Ty, 1.0), C, DL))
3073 }
3074
3075
3076
3077
3084 }
3085
3086 auto m_FaddRdx = [](Value *&Sum, Value *&Vec) {
3087 return m_OneUse(m_IntrinsicIntrinsic::vector\_reduce\_fadd(m_Value(Sum),
3089 };
3090 Value *A0, *A1, *V0, *V1;
3091 if (match(Op0, m_FaddRdx(A0, V0)) && match(Op1, m_FaddRdx(A1, V1)) &&
3093
3094
3097 {Sub->getType()}, {A0, Sub}, &I);
3099 }
3100
3102 return F;
3103
3104
3105
3106
3107
3110
3111
3115 }
3116 }
3117
3118 return nullptr;
3119}
static bool isConstant(const MachineInstr &MI)
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Instruction * factorizeFAddFSub(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Factor a common operand out of fadd/fsub of fmul/fdiv.
static Instruction * foldAddToAshr(BinaryOperator &Add)
Try to reduce signed division by power-of-2 to an arithmetic shift right.
static bool MatchMul(Value *E, Value *&Op, APInt &C)
static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned)
static Instruction * foldFNegIntoConstant(Instruction &I, const DataLayout &DL)
This eliminates floating-point negation in either 'fneg(X)' or 'fsub(-0.0, X)' form by combining into...
static Instruction * combineAddSubWithShlAddSub(InstCombiner::BuilderTy &Builder, const BinaryOperator &I)
static Instruction * factorizeLerp(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Eliminate an op from a linear interpolation (lerp) pattern.
static Instruction * foldSubOfMinMax(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Instruction * foldBoxMultiply(BinaryOperator &I)
Reduce a sequence of masked half-width multiplies to a single multiply.
static Value * checkForNegativeOperand(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned)
static Instruction * foldNoWrapAdd(BinaryOperator &Add, InstCombiner::BuilderTy &Builder)
Wrapping flags may allow combining constants separated by an extend.
static bool matchesSquareSum(BinaryOperator &I, Mul2Rhs M2Rhs, Value *&A, Value *&B)
static Instruction * factorizeMathWithShlOps(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
This is a specialization of a more general transform from foldUsingDistributiveLaws.
static Instruction * canonicalizeLowbitMask(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Fold (1 << NBits) - 1 Into: ~(-(1 << NBits)) Because a 'not' is better for bit-tracking analysis and ...
static Instruction * foldToUnsignedSaturatedAdd(BinaryOperator &I)
static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
const fltSemantics & getSemantics() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isNegative() const
Determine sign of this APInt.
int32_t exactLogBase2() const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned logBase2() const
APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool isMask(unsigned numBits) const
APInt sext(unsigned width) const
Sign extend to a new width.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
bool isZero() const
Return true if the value is positive or negative zero.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool isElementWiseEqual(Value *Y) const
Return true if this constant and a constant 'Y' are element-wise equal.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
This provides a helper for copying FMF from an instruction or setting specified flags.
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedWrap() const
GEPNoWrapFlags getNoWrapFlags() const
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Value * CreateFSubFMF(Value *L, Value *R, FMFSource FMFSource, const Twine &Name="", MDNode *FPMD=nullptr)
Value * CreateSRem(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Value * CreateFPTrunc(Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr)
ConstantInt * getTrue()
Get the constant value for i1 true.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNeg(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg > -1.
Value * CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateNot(Value *V, const Twine &Name="")
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Value * CreateIsNeg(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg < 0.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateCopySign(Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create call to the copysign intrinsic.
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateFAddFMF(Value *L, Value *R, FMFSource FMFSource, const Twine &Name="", MDNode *FPMD=nullptr)
Value * CreateFPExt(Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateFNegFMF(Value *V, FMFSource FMFSource, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateFDivFMF(Value *L, Value *R, FMFSource FMFSource, const Twine &Name="", MDNode *FPMD=nullptr)
Value * CreateURem(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateFMulFMF(Value *L, Value *R, FMFSource FMFSource, const Twine &Name="", MDNode *FPMD=nullptr)
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * visitAdd(BinaryOperator &I)
Instruction * canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(BinaryOperator &I)
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * foldSquareSumInt(BinaryOperator &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Instruction * foldSquareSumFP(BinaryOperator &I)
Instruction * visitSub(BinaryOperator &I)
Value * OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty, bool isNUW)
Optimize pointer differences into the same array into a size.
Instruction * visitFAdd(BinaryOperator &I)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Instruction * foldAddLikeCommutative(Value *LHS, Value *RHS, bool NSW, bool NUW)
Common transforms for add / disjoint or.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Value * SimplifyAddWithRemainder(BinaryOperator &I)
Tries to simplify add operations using the definition of remainder.
Instruction * foldAddWithConstant(BinaryOperator &Add)
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
Instruction * visitFNeg(UnaryOperator &I)
Instruction * visitFSub(BinaryOperator &I)
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
static Constant * AddOne(Constant *C)
Add one to a Constant.
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
void setHasNoSignedZeros(bool B)
Set or clear the no-signed-zeros flag on this instruction, which must be an operator which supports t...
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
void setHasNoInfs(bool B)
Set or clear the no-infs flag on this instruction, which must be an operator which supports this flag...
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
A wrapper class for inspecting calls to intrinsic functions.
static Value * Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
StringRef getName() const
Return a constant reference to the value's name.
This class represents zero extension of integer types.
@ C
The default llvm calling convention, compatible with C.
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_or< CastInst_match< OpTy, SExtInst >, OpTy > m_SExtOrSelf(const OpTy &Op)
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
OneUse_match< T > m_OneUse(const T &SubPattern)
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)
Matches an SMin with LHS and RHS in either order.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)
Matches an SMax with LHS and RHS in either order.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FDiv > m_FDiv(const LHS &L, const RHS &R)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
@ CE
Windows NT (Windows on ARM)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt & operator+=(DynamicAPInt &A, int64_t B)
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt & operator*=(DynamicAPInt &A, int64_t B)
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
bool cannotBeNegativeZero(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if we can prove that the specified FP value is never equal to -0.0.
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A suitably aligned and sized character array member which can hold elements of any type.
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutDomCondCache() const