LLVM: lib/Transforms/InstCombine/InstCombineAddSub.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

33#include

34#include

35

36using namespace llvm;

38

39#define DEBUG_TYPE "instcombine"

40

41namespace {

42

43

44

45

46

47

48

49 class FAddendCoef {

50 public:

51

52

53

54

55

56 FAddendCoef() = default;

57 ~FAddendCoef();

58

59

60

61 void operator=(const FAddendCoef &A);

63 void operator*=(const FAddendCoef &S);

64

65 void set(short C) {

66 assert(!insaneIntVal(C) && "Insane coefficient");

67 IsFp = false; IntVal = C;

68 }

69

71

72 void negate();

73

74 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }

76

77 bool isOne() const { return isInt() && IntVal == 1; }

78 bool isTwo() const { return isInt() && IntVal == 2; }

79 bool isMinusOne() const { return isInt() && IntVal == -1; }

80 bool isMinusTwo() const { return isInt() && IntVal == -2; }

81

82 private:

83 bool insaneIntVal(int V) { return V > 4 || V < -4; }

84

85 APFloat *getFpValPtr() { return reinterpret_cast<APFloat *>(&FpValBuf); }

86

87 const APFloat *getFpValPtr() const {

88 return reinterpret_cast<const APFloat *>(&FpValBuf);

89 }

90

91 const APFloat &getFpVal() const {

92 assert(IsFp && BufHasFpVal && "Incorret state");

93 return *getFpValPtr();

94 }

95

97 assert(IsFp && BufHasFpVal && "Incorret state");

98 return *getFpValPtr();

99 }

100

101 bool isInt() const { return !IsFp; }

102

103

104

105 void convertToFpType(const fltSemantics &Sem);

106

107

108

109

111

112 bool IsFp = false;

113

114

115 bool BufHasFpVal = false;

116

117

118

119

120

121 short IntVal = 0;

122

124 };

125

126

127

128

129 class FAddend {

130 public:

131 FAddend() = default;

132

134 assert((Val == T.Val) && "Symbolic-values disagree");

135 Coeff += T.Coeff;

136 }

137

138 Value *getSymVal() const { return Val; }

139 const FAddendCoef &getCoef() const { return Coeff; }

140

141 bool isConstant() const { return Val == nullptr; }

142 bool isZero() const { return Coeff.isZero(); }

143

144 void set(short Coefficient, Value *V) {

145 Coeff.set(Coefficient);

146 Val = V;

147 }

148 void set(const APFloat &Coefficient, Value *V) {

149 Coeff.set(Coefficient);

150 Val = V;

151 }

153 Coeff.set(Coefficient->getValueAPF());

154 Val = V;

155 }

156

157 void negate() { Coeff.negate(); }

158

159

160

161 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);

162

163

164

165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;

166

167 private:

168 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }

169

170

171 Value *Val = nullptr;

172 FAddendCoef Coeff;

173 };

174

175

176

177

178 class FAddCombine {

179 public:

181

183

184 private:

186

187 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);

188

189

190 Value *createAddendVal(const FAddend &A, bool& NeedNeg);

191

192

193 unsigned calcInstrNumber(const AddendVect& Vect);

194

199 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);

200 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);

201

202

203 #ifndef NDEBUG

204 unsigned CreateInstrNum;

205 void initCreateInstNum() { CreateInstrNum = 0; }

206 void incCreateInstNum() { CreateInstrNum++; }

207 #else

208 void initCreateInstNum() {}

209 void incCreateInstNum() {}

210 #endif

211

214 };

215

216}

217

218

219

220

221

222

223

224FAddendCoef::~FAddendCoef() {

225 if (BufHasFpVal)

226 getFpValPtr()->~APFloat();

227}

228

229void FAddendCoef::set(const APFloat& C) {

231

233

234

236 } else

237 *P = C;

238

239 IsFp = BufHasFpVal = true;

240}

241

242void FAddendCoef::convertToFpType(const fltSemantics &Sem) {

244 return;

245

247 if (IntVal > 0)

249 else {

250 new(P) APFloat(Sem, 0 - IntVal);

251 P->changeSign();

252 }

253 IsFp = BufHasFpVal = true;

254}

255

256APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {

257 if (Val >= 0)

258 return APFloat(Sem, Val);

259

261 T.changeSign();

262

263 return T;

264}

265

266void FAddendCoef::operator=(const FAddendCoef &That) {

267 if (That.isInt())

268 set(That.IntVal);

269 else

270 set(That.getFpVal());

271}

272

273void FAddendCoef::operator+=(const FAddendCoef &That) {

274 RoundingMode RndMode = RoundingMode::NearestTiesToEven;

275 if (isInt() == That.isInt()) {

277 IntVal += That.IntVal;

278 else

279 getFpVal().add(That.getFpVal(), RndMode);

280 return;

281 }

282

284 const APFloat &T = That.getFpVal();

285 convertToFpType(T.getSemantics());

286 getFpVal().add(T, RndMode);

287 return;

288 }

289

291 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);

292}

293

294void FAddendCoef::operator*=(const FAddendCoef &That) {

295 if (That.isOne())

296 return;

297

298 if (That.isMinusOne()) {

299 negate();

300 return;

301 }

302

303 if (isInt() && That.isInt()) {

304 int Res = IntVal * (int)That.IntVal;

305 assert(!insaneIntVal(Res) && "Insane int value");

306 IntVal = Res;

307 return;

308 }

309

310 const fltSemantics &Semantic =

311 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();

312

314 convertToFpType(Semantic);

315 APFloat &F0 = getFpVal();

316

317 if (That.isInt())

318 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),

319 APFloat::rmNearestTiesToEven);

320 else

321 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);

322}

323

324void FAddendCoef::negate() {

326 IntVal = 0 - IntVal;

327 else

328 getFpVal().changeSign();

329}

330

331Value *FAddendCoef::getValue(Type *Ty) const {

333 ConstantFP::get(Ty, float(IntVal)) :

335}

336

337

338

339

340

341

342

343

344

345

346

347unsigned FAddend::drillValueDownOneStep

348 (Value *Val, FAddend &Addend0, FAddend &Addend1) {

351 return 0;

352

353 unsigned Opcode = I->getOpcode();

354

355 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {

357 Value *Opnd0 = I->getOperand(0);

358 Value *Opnd1 = I->getOperand(1);

360 Opnd0 = nullptr;

361

363 Opnd1 = nullptr;

364

365 if (Opnd0) {

366 if (!C0)

367 Addend0.set(1, Opnd0);

368 else

369 Addend0.set(C0, nullptr);

370 }

371

372 if (Opnd1) {

373 FAddend &Addend = Opnd0 ? Addend1 : Addend0;

374 if (!C1)

375 Addend.set(1, Opnd1);

376 else

377 Addend.set(C1, nullptr);

378 if (Opcode == Instruction::FSub)

379 Addend.negate();

380 }

381

382 if (Opnd0 || Opnd1)

383 return Opnd0 && Opnd1 ? 2 : 1;

384

385

387 return 1;

388 }

389

390 if (I->getOpcode() == Instruction::FMul) {

391 Value *V0 = I->getOperand(0);

392 Value *V1 = I->getOperand(1);

394 Addend0.set(C, V1);

395 return 1;

396 }

397

399 Addend0.set(C, V0);

400 return 1;

401 }

402 }

403

404 return 0;

405}

406

407

408

409

410unsigned FAddend::drillAddendDownOneStep

411 (FAddend &Addend0, FAddend &Addend1) const {

413 return 0;

414

415 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);

416 if (!BreakNum || Coeff.isOne())

417 return BreakNum;

418

419 Addend0.Scale(Coeff);

420

421 if (BreakNum == 2)

422 Addend1.Scale(Coeff);

423

424 return BreakNum;

425}

426

427Value *FAddCombine::simplify(Instruction *I) {

428 assert(I->hasAllowReassoc() && I->hasNoSignedZeros() &&

429 "Expected 'reassoc'+'nsz' instruction");

430

431

432 if (I->getType()->isVectorTy())

433 return nullptr;

434

435 assert((I->getOpcode() == Instruction::FAdd ||

436 I->getOpcode() == Instruction::FSub) && "Expect add/sub");

437

438

439 Instr = I;

440

441 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;

442

443 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);

444

445

446 unsigned Opnd0_ExpNum = 0;

447 unsigned Opnd1_ExpNum = 0;

448

449 if (!Opnd0.isConstant())

450 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);

451

452

453 if (OpndNum == 2 && !Opnd1.isConstant())

454 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);

455

456

457 if (Opnd0_ExpNum && Opnd1_ExpNum) {

458 AddendVect AllOpnds;

460 AllOpnds.push_back(&Opnd1_0);

461 if (Opnd0_ExpNum == 2)

462 AllOpnds.push_back(&Opnd0_1);

463 if (Opnd1_ExpNum == 2)

464 AllOpnds.push_back(&Opnd1_1);

465

466

467 unsigned InstQuota = 0;

468

469 Value *V0 = I->getOperand(0);

470 Value *V1 = I->getOperand(1);

473

474 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))

475 return R;

476 }

477

478 if (OpndNum != 2) {

479

480

481

482

483 const FAddendCoef &CE = Opnd0.getCoef();

484 return CE.isOne() ? Opnd0.getSymVal() : nullptr;

485 }

486

487

488 if (Opnd1_ExpNum) {

489 AddendVect AllOpnds;

491 AllOpnds.push_back(&Opnd1_0);

492 if (Opnd1_ExpNum == 2)

493 AllOpnds.push_back(&Opnd1_1);

494

495 if (Value *R = simplifyFAdd(AllOpnds, 1))

496 return R;

497 }

498

499

500 if (Opnd0_ExpNum) {

501 AddendVect AllOpnds;

503 AllOpnds.push_back(&Opnd0_0);

504 if (Opnd0_ExpNum == 2)

505 AllOpnds.push_back(&Opnd0_1);

506

507 if (Value *R = simplifyFAdd(AllOpnds, 1))

508 return R;

509 }

510

511 return nullptr;

512}

513

514Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {

515 unsigned AddendNum = Addends.size();

516 assert(AddendNum <= 4 && "Too many addends");

517

518

519 unsigned NextTmpIdx = 0;

520 FAddend TmpResult[3];

521

522

523 AddendVect SimpVect;

524

525

526

527

528 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {

529

530 const FAddend *ThisAddend = Addends[SymIdx];

531 if (!ThisAddend) {

532

533 continue;

534 }

535

536 Value *Val = ThisAddend->getSymVal();

537

538

539

540

541

542

543

544

545 unsigned StartIdx = SimpVect.size();

546 SimpVect.push_back(ThisAddend);

547

548

549

550

551

552

553 for (unsigned SameSymIdx = SymIdx + 1;

554 SameSymIdx < AddendNum; SameSymIdx++) {

555 const FAddend *T = Addends[SameSymIdx];

556 if (T && T->getSymVal() == Val) {

557

558

559 Addends[SameSymIdx] = nullptr;

560 SimpVect.push_back(T);

561 }

562 }

563

564

565 if (StartIdx + 1 != SimpVect.size()) {

566 FAddend &R = TmpResult[NextTmpIdx ++];

567 R = *SimpVect[StartIdx];

568 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)

569 R += *SimpVect[Idx];

570

571

572 SimpVect.resize(StartIdx);

573 if (R.isZero()) {

574 SimpVect.push_back(&R);

575 }

576 }

577 }

578

579 assert((NextTmpIdx <= std::size(TmpResult) + 1) && "out-of-bound access");

580

582 if (!SimpVect.empty())

583 Result = createNaryFAdd(SimpVect, InstrQuota);

584 else {

585

587 }

588

590}

591

592Value *FAddCombine::createNaryFAdd

593 (const AddendVect &Opnds, unsigned InstrQuota) {

594 assert(!Opnds.empty() && "Expect at least one addend");

595

596

597

598 unsigned InstrNeeded = calcInstrNumber(Opnds);

599 if (InstrNeeded > InstrQuota)

600 return nullptr;

601

602 initCreateInstNum();

603

604

605

606

607

608

609

610

611

612 Value *LastVal = nullptr;

613 bool LastValNeedNeg = false;

614

615

616 for (const FAddend *Opnd : Opnds) {

617 bool NeedNeg;

618 Value *V = createAddendVal(*Opnd, NeedNeg);

619 if (!LastVal) {

620 LastVal = V;

621 LastValNeedNeg = NeedNeg;

622 continue;

623 }

624

625 if (LastValNeedNeg == NeedNeg) {

626 LastVal = createFAdd(LastVal, V);

627 continue;

628 }

629

630 if (LastValNeedNeg)

631 LastVal = createFSub(V, LastVal);

632 else

633 LastVal = createFSub(LastVal, V);

634

635 LastValNeedNeg = false;

636 }

637

638 if (LastValNeedNeg) {

639 LastVal = createFNeg(LastVal);

640 }

641

642#ifndef NDEBUG

643 assert(CreateInstrNum == InstrNeeded &&

644 "Inconsistent in instruction numbers");

645#endif

646

647 return LastVal;

648}

649

650Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {

651 Value *V = Builder.CreateFSub(Opnd0, Opnd1);

653 createInstPostProc(I);

654 return V;

655}

656

657Value *FAddCombine::createFNeg(Value *V) {

658 Value *NewV = Builder.CreateFNeg(V);

660 createInstPostProc(I, true);

661 return NewV;

662}

663

664Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {

665 Value *V = Builder.CreateFAdd(Opnd0, Opnd1);

667 createInstPostProc(I);

668 return V;

669}

670

671Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {

672 Value *V = Builder.CreateFMul(Opnd0, Opnd1);

674 createInstPostProc(I);

675 return V;

676}

677

678void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {

680

681

682 if (!NoNumber)

683 incCreateInstNum();

684

685

687}

688

689

690

691unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {

692 unsigned OpndNum = Opnds.size();

693 unsigned InstrNeeded = OpndNum - 1;

694

695

696 for (const FAddend *Opnd : Opnds) {

697 if (Opnd->isConstant())

698 continue;

699

700

701

703 continue;

704

705 const FAddendCoef &CE = Opnd->getCoef();

706

707

708

709 if (CE.isMinusOne() && CE.isOne())

710 InstrNeeded++;

711 }

712 return InstrNeeded;

713}

714

715

716

717

718

719

720

721

722

723Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {

724 const FAddendCoef &Coeff = Opnd.getCoef();

725

726 if (Opnd.isConstant()) {

727 NeedNeg = false;

728 return Coeff.getValue(Instr->getType());

729 }

730

731 Value *OpndVal = Opnd.getSymVal();

732

733 if (Coeff.isMinusOne() || Coeff.isOne()) {

734 NeedNeg = Coeff.isMinusOne();

735 return OpndVal;

736 }

737

738 if (Coeff.isTwo() || Coeff.isMinusTwo()) {

739 NeedNeg = Coeff.isMinusTwo();

740 return createFAdd(OpndVal, OpndVal);

741 }

742

743 NeedNeg = false;

744 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));

745}

746

747

748

749

750

751

754 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);

755

756

757

758 if (LHS->hasOneUse() && RHS->hasOneUse())

759 return nullptr;

760

761 Value *X = nullptr, *Y = nullptr, *Z = nullptr;

762 const APInt *C1 = nullptr, *C2 = nullptr;

763

764

767

769

772

774

775

777 Value *NewAnd = Builder.CreateAnd(Z, *C1);

778 return Builder.CreateSub(RHS, NewAnd, "sub");

780

781

782 Value *NewOr = Builder.CreateOr(Z, ~(*C1));

783 return Builder.CreateSub(RHS, NewOr, "sub");

784 }

785 }

786 }

787

788

789 LHS = I.getOperand(0);

790 RHS = I.getOperand(1);

791

792

795

796

797

798

802 Value *NewOr = Builder.CreateOr(Z, ~(*C2));

803 return Builder.CreateSub(RHS, NewOr, "sub");

804 }

805 return nullptr;

806}

807

808

811 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);

812 Type *Ty = Add.getType();

815 return nullptr;

816

817

818

820 const APInt *C1, *C2;

825

828

831 Builder.CreateNUWAdd(X, ConstantInt::get(X->getType(), NewC)), Ty);

832 }

833

834

835

836

840 Value *WideC = Builder.CreateSExt(NarrowC, Ty);

841 Value *NewC = Builder.CreateAdd(WideC, Op1C);

842 Value *WideX = Builder.CreateSExt(X, Ty);

843 return BinaryOperator::CreateAdd(WideX, NewC);

844 }

845

848 Value *WideC = Builder.CreateZExt(NarrowC, Ty);

849 Value *NewC = Builder.CreateAdd(WideC, Op1C);

850 Value *WideX = Builder.CreateZExt(X, Ty);

851 return BinaryOperator::CreateAdd(WideX, NewC);

852 }

853 return nullptr;

854}

855

857 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);

858 Type *Ty = Add.getType();

861 return nullptr;

862

864 return NV;

865

868

869

872

874

875

878 return BinaryOperator::CreateAdd(Builder.CreateNot(Y), X);

879

880

882 X->getType()->getScalarSizeInBits() == 1)

884 Op1);

885

887 X->getType()->getScalarSizeInBits() == 1)

889 Op1);

890

891

893

894 auto *COne = ConstantInt::get(Op1C->getType(), 1);

895 bool WillNotSOV = willNotOverflowSignedSub(Op1C, COne, Add);

899 return Res;

900 }

901

902

904 unsigned BitWidth = Ty->getScalarSizeInBits();

908 return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);

909

911 return nullptr;

912

913

919 willNotOverflowSignedAdd(Op01C, Op1C, Add));

921 return NewAdd;

922 }

923

924

927 return BinaryOperator::CreateXor(Op0, ConstantInt::get(Add.getType(), *C2));

928

929 if (C->isSignMask()) {

930

931

932 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap())

933 return BinaryOperator::CreateOr(Op0, Op1);

934

935

936

937 return BinaryOperator::CreateXor(Op0, Op1);

938 }

939

940

941

945

947

949 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C2 ^ *C));

950

951

952

955 if ((*C2 | LHSKnown.Zero).isAllOnes())

956 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C2 + *C), X);

957 }

958

959

960

961

962

963 if (Op0->hasOneUse() && *C2 == -(*C)) {

964 unsigned BitWidth = Ty->getScalarSizeInBits();

965 unsigned ShAmt = 0;

966 if (C->isPowerOf2())

967 ShAmt = BitWidth - C->logBase2() - 1;

970 if (ShAmt &&

972 Constant *ShAmtC = ConstantInt::get(Ty, ShAmt);

973 Value *NewShl = Builder.CreateShl(X, ShAmtC, "sext");

974 return BinaryOperator::CreateAShr(NewShl, ShAmtC);

975 }

976 }

977 }

978

979 if (C->isOne() && Op0->hasOneUse()) {

980

981

982

983

984

986 X->getType()->getScalarSizeInBits() == 1)

988

989

990

993 C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) {

995 return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1));

996 }

997 }

998

999

1003 Intrinsic::usub_sat, X, ConstantInt::get(Add.getType(), -*C)));

1004

1005

1006

1007 if (C->isOne()) {

1012 }

1013 }

1014

1015 return nullptr;

1016}

1017

1018

1019

1020

1021

1022

1023

1024

1025

1026template <bool FP, typename Mul2Rhs>

1029 constexpr unsigned MulOp = FP ? Instruction::FMul : Instruction::Mul;

1030 constexpr unsigned AddOp = FP ? Instruction::FAdd : Instruction::Add;

1031 constexpr unsigned Mul2Op = FP ? Instruction::FMul : Instruction::Shl;

1032

1033

1037 MulOp,

1041 return true;

1042

1043

1044

1045

1048 AddOp,

1057}

1058

1059

1064 return BinaryOperator::CreateMul(AB, AB);

1065 }

1066 return nullptr;

1067}

1068

1069

1070

1072 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() && "Assumption mismatch");

1077 }

1078 return nullptr;

1079}

1080

1081

1082

1083

1085 const APInt *AI;

1087 C = *AI;

1088 return true;

1089 }

1092 C <<= *AI;

1093 return true;

1094 }

1095 return false;

1096}

1097

1098

1099

1100

1101

1103 const APInt *AI;

1104 IsSigned = false;

1106 IsSigned = true;

1107 C = *AI;

1108 return true;

1109 }

1111 C = *AI;

1112 return true;

1113 }

1115 C = *AI + 1;

1116 return true;

1117 }

1118 return false;

1119}

1120

1121

1122

1123

1125 const APInt *AI;

1127 C = *AI;

1128 return true;

1129 }

1130 if (!IsSigned) {

1132 C = *AI;

1133 return true;

1134 }

1137 C <<= *AI;

1138 return true;

1139 }

1140 }

1141 return false;

1142}

1143

1144

1146 bool overflow;

1147 if (IsSigned)

1148 (void)C0.smul_ov(C1, overflow);

1149 else

1150 (void)C0.umul_ov(C1, overflow);

1151 return overflow;

1152}

1153

1154

1155

1156

1157

1159 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);

1161 APInt C0, MulOpC;

1162 bool IsSigned;

1163

1164 if (((MatchRem(LHS, X, C0, IsSigned) && MatchMul(RHS, MulOpV, MulOpC)) ||

1165 (MatchRem(RHS, X, C0, IsSigned) && MatchMul(LHS, MulOpV, MulOpC))) &&

1166 C0 == MulOpC) {

1169 bool Rem2IsSigned;

1170

1171 if (MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) &&

1172 IsSigned == Rem2IsSigned) {

1175

1176 if (MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) && X == DivOpV &&

1178 Value *NewDivisor = ConstantInt::get(X->getType(), C0 * C1);

1179 return IsSigned ? Builder.CreateSRem(X, NewDivisor, "srem")

1180 : Builder.CreateURem(X, NewDivisor, "urem");

1181 }

1182 }

1183 }

1184

1185

1186 Value *Div, *Rem;

1188 if (!LHS->hasOneUse() || MatchMul(LHS, Div, C1))

1189 Div = LHS, C1 = APInt(I.getType()->getScalarSizeInBits(), 1);

1190 if (!RHS->hasOneUse() || MatchMul(RHS, Rem, C2))

1191 Rem = RHS, C2 = APInt(I.getType()->getScalarSizeInBits(), 1);

1195 }

1198 if (MatchRem(Rem, X, C0, IsSigned) &&

1199 MatchDiv(Div, DivOpV, DivOpC, IsSigned) && X == DivOpV && C0 == DivOpC &&

1200

1201 !(C1.isOne() && !IsSigned && DivOpC.isPowerOf2() && DivOpC != 2)) {

1202 APInt NewC = C1 - C2 * C0;

1204 return nullptr;

1206 return nullptr;

1207 Value *MulXC2 = Builder.CreateMul(X, ConstantInt::get(X->getType(), C2));

1209 return MulXC2;

1210 return Builder.CreateAdd(

1211 Builder.CreateMul(Div, ConstantInt::get(X->getType(), NewC)), MulXC2);

1212 }

1213

1214 return nullptr;

1215}

1216

1217

1218

1219

1220

1221

1222

1227 return nullptr;

1228

1230 Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask");

1231

1233

1234 BOp->setHasNoSignedWrap();

1235 BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());

1236 }

1237

1239}

1240

1242 assert(I.getOpcode() == Instruction::Add && "Expecting add instruction");

1243 Type *Ty = I.getType();

1244 auto getUAddSat = [&]() {

1246 Ty);

1247 };

1248

1249

1254

1255

1256 const APInt *C, *NotC;

1258 *C == ~*NotC)

1259 return CallInst::Create(getUAddSat(), { X, ConstantInt::get(Ty, *C) });

1260

1261 return nullptr;

1262}

1263

1264

1265

1266

1273 Value *NewShl = Builder.CreateShl(B, Cnt);

1274 return BinaryOperator::CreateSub(A, NewShl);

1275 }

1276 return nullptr;

1277}

1278

1279

1281

1283 const APInt *DivC;

1286 return nullptr;

1287

1288

1289

1290

1291

1292

1293

1294

1295 const APInt *MaskC, *MaskCCmp;

1297 if (match(Add.getOperand(1),

1300 return nullptr;

1301

1304 return nullptr;

1305

1308 ? (*MaskC == (SMin | (*DivC - 1)))

1309 : (*DivC == 2 && *MaskC == SMin + 1);

1310 if (!IsMaskValid)

1311 return nullptr;

1312

1313

1314 return BinaryOperator::CreateAShr(

1316}

1317

1319 bool NSW, bool NUW) {

1323 Instruction *R = BinaryOperator::CreateSub(C, B);

1326

1329 R->setHasNoSignedWrap(NSWOut);

1330 R->setHasNoUnsignedWrap(NUWOut);

1331 return R;

1332 }

1333

1334

1335 const APInt *C1, *C2;

1338 APInt MinusC1 = -(*C1);

1339 if (MinusC1 == (One << *C2)) {

1340 Constant *NewRHS = ConstantInt::get(RHS->getType(), MinusC1);

1341 return BinaryOperator::CreateSRem(RHS, NewRHS);

1342 }

1343 }

1344

1345 return nullptr;

1346}

1347

1351 assert((I.getOpcode() == Instruction::Add ||

1352 I.getOpcode() == Instruction::Or ||

1353 I.getOpcode() == Instruction::Sub) &&

1354 "Expecting add/or/sub instruction");

1355

1356

1357

1364 return nullptr;

1365

1366

1367 if (I.getOpcode() == Instruction::Sub && I.getOperand(1) != Select)

1368 return nullptr;

1369

1370 Type *XTy = X->getType();

1371 bool HadTrunc = I.getType() != XTy;

1372

1373

1374

1376 return nullptr;

1377

1378

1379

1380

1381

1383 if (match(LowBitsToSkip,

1386 return nullptr;

1387

1388

1389

1390 auto SkipExtInMagic = [&I](Value *&V) {

1391 if (I.getOpcode() == Instruction::Sub)

1393 else

1395 };

1396

1397

1398

1399 SkipExtInMagic(Select);

1400

1402 const APInt *Thr;

1403 Value *SignExtendingValue, *Zero;

1404 bool ShouldSignext;

1405

1406

1407

1411 return nullptr;

1412

1413

1414 if (!ShouldSignext)

1415 std::swap(SignExtendingValue, Zero);

1416

1417

1419 return nullptr;

1420

1421

1422

1423 SkipExtInMagic(SignExtendingValue);

1424 Constant *SignExtendingValueBaseConstant;

1425 if (match(SignExtendingValue,

1428 return nullptr;

1429

1430 if (I.getOpcode() == Instruction::Sub

1431 ? match(SignExtendingValueBaseConstant, m_One())

1432 : match(SignExtendingValueBaseConstant, m_AllOnes()))

1433 return nullptr;

1434

1435 auto *NewAShr = BinaryOperator::CreateAShr(X, LowBitsToSkip,

1436 Extract->getName() + ".sext");

1437 NewAShr->copyIRFlags(Extract);

1438 if (!HadTrunc)

1439 return NewAShr;

1440

1441 Builder.Insert(NewAShr);

1443}

1444

1445

1446

1447

1450

1451 assert((I.getOpcode() == Instruction::Add ||

1452 I.getOpcode() == Instruction::Sub) &&

1453 "Expected add/sub");

1456 if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse()))

1457 return nullptr;

1458

1462 return nullptr;

1463

1464

1465 bool HasNSW = I.hasNoSignedWrap() && Op0->hasNoSignedWrap() &&

1466 Op1->hasNoSignedWrap();

1467 bool HasNUW = I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() &&

1468 Op1->hasNoUnsignedWrap();

1469

1470

1471 Value *NewMath = Builder.CreateBinOp(I.getOpcode(), X, Y);

1473 NewI->setHasNoSignedWrap(HasNSW);

1474 NewI->setHasNoUnsignedWrap(HasNUW);

1475 }

1476 auto *NewShl = BinaryOperator::CreateShl(NewMath, ShAmt);

1477 NewShl->setHasNoSignedWrap(HasNSW);

1478 NewShl->setHasNoUnsignedWrap(HasNUW);

1479 return NewShl;

1480}

1481

1482

1483

1485 unsigned BitWidth = I.getType()->getScalarSizeInBits();

1486

1488 return nullptr;

1489

1490 unsigned HalfBits = BitWidth >> 1;

1492

1493

1494 Value *XLo, *YLo;

1495 Value *CrossSum;

1496

1497

1500 return nullptr;

1501

1502

1503

1504

1505

1509 return nullptr;

1510

1511

1512

1513 if (match(CrossSum,

1518 return BinaryOperator::CreateMul(X, Y);

1519

1520 return nullptr;

1521}

1522

1525 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),

1526 SQ.getWithInstruction(&I)))

1528

1530 return &I;

1531

1533 return X;

1534

1536 return Phi;

1537

1538

1541

1543 return R;

1544

1546 return R;

1547

1549 return X;

1550

1552 return X;

1553

1555 return R;

1556

1558 return R;

1559

1560 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);

1562 I.hasNoUnsignedWrap()))

1563 return R;

1565 I.hasNoUnsignedWrap()))

1566 return R;

1567 Type *Ty = I.getType();

1568 if (Ty->isIntOrIntVectorTy(1))

1569 return BinaryOperator::CreateXor(LHS, RHS);

1570

1571

1572 if (LHS == RHS) {

1573 auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1));

1574 Shl->setHasNoSignedWrap(I.hasNoSignedWrap());

1575 Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());

1576 return Shl;

1577 }

1578

1581

1584

1585

1586 auto *Sub = BinaryOperator::CreateSub(RHS, A);

1588 Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OB0->hasNoSignedWrap());

1589

1590 return Sub;

1591 }

1592

1593

1595 auto *Sub = BinaryOperator::CreateSub(LHS, B);

1597 Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO->hasNoSignedWrap());

1598 return Sub;

1599 }

1600

1603

1604

1605

1606

1607

1610 return BinaryOperator::CreateSub(A, B);

1611

1612

1614 return BinaryOperator::CreateAdd(A, Builder.CreateShl(RHS, 1, "reass.add"));

1615

1616

1618 return BinaryOperator::CreateAdd(A, Builder.CreateShl(LHS, 1, "reass.add"));

1619

1620 {

1621

1625 (LHS->hasOneUse() || RHS->hasOneUse())) {

1628 }

1629

1630

1631

1635 return BinaryOperator::CreateAdd(Sub, C1);

1636 }

1637 }

1638

1639

1641

1642 const APInt *C1;

1643

1646 Constant *NewMask = ConstantInt::get(RHS->getType(), *C1 - 1);

1647 return BinaryOperator::CreateAnd(A, NewMask);

1648 }

1649

1650

1655 return new ZExtInst(B, LHS->getType());

1656

1657

1659 A->getType()->isIntOrIntVectorTy(1))

1661

1662

1667 A->getType()->isIntOrIntVectorTy()) {

1671 }

1672

1677 Ty,

1679 {A, B}));

1680 }

1681

1682

1685 return BinaryOperator::CreateDisjointOr(LHS, RHS);

1686

1687 if (Instruction *Ext = narrowMathIfNoOverflow(I))

1688 return Ext;

1689

1690

1691

1694 return BinaryOperator::CreateOr(A, B);

1695

1696

1697

1700

1703 return &I;

1704 }

1705

1706

1707

1708

1709

1714 I.hasNoUnsignedWrap(), I.hasNoSignedWrap());

1715 return BinaryOperator::CreateAnd(Add, A);

1716 }

1717

1718

1719

1726 return BinaryOperator::CreateAnd(Dec, Not);

1727 }

1728

1729

1730

1731

1732

1733

1737 Type *Ty = I.getType();

1738 Constant *NewMulC = ConstantInt::get(Ty, 1 - *C1);

1741 }

1742

1743

1744 const APInt *NegPow2C;

1749 return BinaryOperator::CreateSub(B, Shl);

1750 }

1751

1752

1753

1758 Value *NotZero = Builder.CreateIsNotNull(A, "isnotnull");

1759 Value *Zext = Builder.CreateZExt(NotZero, Ty, "isnotnull.zext");

1760 return BinaryOperator::CreateOr(LHS, Zext);

1761 }

1762

1763 {

1766

1767

1768 auto CondMatcher =

1771

1778 }

1779 }

1780

1781

1785 Value *OneConst = ConstantInt::get(A->getType(), 1);

1786 Value *UMax = Builder.CreateBinaryIntrinsic(Intrinsic::umax, A, OneConst);

1788 }

1789

1791 return Ashr;

1792

1793

1794

1795

1796 {

1798 const APInt *ShiftAmt, *Mask;

1800

1801

1802

1808 Mask->popcount() == *ShiftAmt) {

1809

1810

1811 Constant *MaskC = ConstantInt::get(X->getType(), *Mask);

1812 if (willNotOverflowUnsignedAdd(X, MaskC, I)) {

1813

1815 return BinaryOperator::CreateLShr(

1816 Add, ConstantInt::get(X->getType(), *ShiftAmt));

1817 }

1818 }

1819 }

1820

1821

1822 {

1823

1824

1825 bool ConsumesLHS, ConsumesRHS;

1826 if (isFreeToInvert(LHS, LHS->hasOneUse(), ConsumesLHS) && ConsumesLHS &&

1827 isFreeToInvert(RHS, RHS->hasOneUse(), ConsumesRHS) && ConsumesRHS) {

1830 assert(NotLHS != nullptr && NotRHS != nullptr &&

1831 "isFreeToInvert desynced with getFreelyInverted");

1832 Value *LHSPlusRHS = Builder.CreateAdd(NotLHS, NotRHS);

1833 return BinaryOperator::CreateSub(

1835 }

1836 }

1837

1839 return R;

1840

1841

1842

1843

1845 if (I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHSCache, RHSCache, I)) {

1847 I.setHasNoSignedWrap(true);

1848 }

1849 if (I.hasNoUnsignedWrap() &&

1850 willNotOverflowUnsignedAdd(LHSCache, RHSCache, I)) {

1852 I.setHasNoUnsignedWrap(true);

1853 }

1854

1856 return V;

1857

1860 return V;

1861

1863 return SatAdd;

1864

1865

1870 Builder.CreateIntrinsic(Intrinsic::umax, {I.getType()}, {A, B}));

1871 }

1872

1873

1878 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},

1879 {Builder.CreateOr(A, B)}));

1880

1881

1882

1883

1884

1885 const APInt *XorC;

1896 *XorC == A->getType()->getScalarSizeInBits() - 1) {

1898 Value *Ctlz = Builder.CreateIntrinsic(Intrinsic::ctlz, {A->getType()},

1901 ConstantInt::get(A->getType(), A->getType()->getScalarSizeInBits()),

1902 Ctlz, "", true, true);

1904 }

1905

1907 return Res;

1908

1909 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))

1910 return Res;

1911

1913 return Res;

1914

1915

1916

1919 Value *Start, *Step;

1922 }

1923

1924 return Changed ? &I : nullptr;

1925}

1926

1927

1935 return nullptr;

1936

1937

1938 Value *XY = Builder.CreateFSubFMF(X, Y, &I);

1939 Value *MulZ = Builder.CreateFMulFMF(Z, XY, &I);

1941}

1942

1943

1946 assert((I.getOpcode() == Instruction::FAdd ||

1947 I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub");

1948 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() &&

1949 "FP factorization requires FMF");

1950

1952 return Lerp;

1953

1954 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);

1955 if (!Op0->hasOneUse() || !Op1->hasOneUse())

1956 return nullptr;

1957

1959 bool IsFMul;

1964 IsFMul = true;

1967 IsFMul = false;

1968 else

1969 return nullptr;

1970

1971

1972

1973

1974

1975 bool IsFAdd = I.getOpcode() == Instruction::FAdd;

1976 Value *XY = IsFAdd ? Builder.CreateFAddFMF(X, Y, &I)

1977 : Builder.CreateFSubFMF(X, Y, &I);

1978

1979

1980

1983 return nullptr;

1984

1987}

1988

1991 I.getFastMathFlags(),

1992 SQ.getWithInstruction(&I)))

1994

1996 return &I;

1997

1999 return X;

2000

2002 return Phi;

2003

2005 return FoldedFAdd;

2006

2007

2008

2009

2010

2011

2016

2017

2021

2022

2023

2029 }

2030

2031

2038 }

2039

2040

2041

2042 if (Instruction *R = foldFBinOpOfIntCasts(I))

2043 return R;

2044

2045 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);

2046

2049

2050 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {

2052 return F;

2053

2055 return F;

2056

2057

2061

2063 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,

2064 {X->getType()}, {Y, X}, &I));

2065 }

2070

2071 Constant *NewStartC = ConstantFP::get(I.getType(), *C + *StartC);

2073 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,

2074 {X->getType()}, {NewStartC, X}, &I));

2075 }

2076

2077

2082 Instruction::FAdd, MulC, ConstantFP::get(I.getType(), 1.0), DL))

2084 }

2085

2086

2090

2093 }

2094

2095

2101

2102

2103

2104 if (!Result->hasNoNaNs())

2105 Result->setHasNoInfs(false);

2106 return Result;

2107 }

2108

2109 return nullptr;

2110}

2111

2114

2115 if (LHS->getType() != RHS->getType())

2116 return Base;

2117

2118

2121 while (true) {

2124 Ptr = GEP->getPointerOperand();

2125 else

2126 break;

2127 }

2128

2129

2130 bool First = true;

2131 while (true) {

2133 Base.Ptr = RHS;

2134 break;

2135 }

2136

2138 Base.RHSGEPs.push_back(GEP);

2141 Base.RHSNW = GEP->getNoWrapFlags();

2142 } else {

2143 Base.RHSNW = Base.RHSNW.intersectForOffsetAdd(GEP->getNoWrapFlags());

2144 }

2145 RHS = GEP->getPointerOperand();

2146 } else {

2147

2148 return Base;

2149 }

2150 }

2151

2152

2154 while (true) {

2155 if (LHS == Base.Ptr)

2156 break;

2157

2159 Base.LHSGEPs.push_back(GEP);

2162 Base.LHSNW = GEP->getNoWrapFlags();

2163 } else {

2164 Base.LHSNW = Base.LHSNW.intersectForOffsetAdd(GEP->getNoWrapFlags());

2165 }

2166 LHS = GEP->getPointerOperand();

2167 }

2168

2169 return Base;

2170}

2171

2173 unsigned NumGEPs = 0;

2175 bool SeenMultiUse = false;

2177

2178

2179

2180 if (GEP->hasOneUse()) {

2181 if (SeenMultiUse)

2182 ++NumGEPs;

2183 SeenMultiUse = true;

2184 }

2185 }

2186 };

2189 return NumGEPs > 2;

2190}

2191

2192

2193

2194

2196 Type *Ty, bool IsNUW) {

2198 if (Base.Ptr || Base.isExpensive())

2199 return nullptr;

2200

2201

2202

2203

2204 bool RewriteGEPs = Base.LHSGEPs.empty() && Base.RHSGEPs.empty();

2205

2206 Type *IdxTy = DL.getIndexType(LHS->getType());

2207 Value *Result = EmitGEPOffsets(Base.LHSGEPs, Base.LHSNW, IdxTy, RewriteGEPs);

2208 Value *Offset2 = EmitGEPOffsets(Base.RHSGEPs, Base.RHSNW, IdxTy, RewriteGEPs);

2209

2210

2211

2213 if (IsNUW && match(Offset2, m_Zero()) && Base.LHSNW.isInBounds() &&

2214 (I->use_empty() || I->hasOneUse()) && I->hasNoSignedWrap() &&

2215 I->hasNoUnsignedWrap() &&

2216 ((I->getOpcode() == Instruction::Mul &&

2218 I->getOpcode() == Instruction::Shl))

2220

2221

2222

2223

2225 Result =

2226 Builder.CreateSub(Result, Offset2, "gepdiff",

2227 IsNUW && Base.LHSNW.hasNoUnsignedWrap() &&

2228 Base.RHSNW.hasNoUnsignedWrap(),

2229 Base.LHSNW.isInBounds() && Base.RHSNW.isInBounds());

2230 }

2231

2232 return Builder.CreateIntCast(Result, Ty, true);

2233}

2234

2237 Value *Op0 = I.getOperand(0);

2238 Value *Op1 = I.getOperand(1);

2239 Type *Ty = I.getType();

2242 return nullptr;

2243

2244

2245

2253 }

2254

2255

2256

2260 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Y, Z});

2261 return BinaryOperator::CreateAdd(X, USub);

2262 }

2264 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Z, Y});

2265 return BinaryOperator::CreateAdd(X, USub);

2266 }

2267 }

2268

2269

2270

2276 }

2277

2278 return nullptr;

2279}

2280

2283 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),

2284 SQ.getWithInstruction(&I)))

2286

2288 return X;

2289

2291 return Phi;

2292

2293 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);

2294

2295

2296

2297 if (Value *V = dyn_castNegVal(Op1)) {

2298 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);

2299

2301 assert(BO->getOpcode() == Instruction::Sub &&

2302 "Expected a subtraction operator!");

2303 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())

2305 } else {

2306 if (cast(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())

2308 }

2309

2310 return Res;

2311 }

2312

2313

2315 return R;

2316

2321

2322

2324

2325

2326 bool WillNotSOV = willNotOverflowSignedSub(C, C2, I);

2330 Res->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO1->hasNoSignedWrap() &&

2331 WillNotSOV);

2333 OBO1->hasNoUnsignedWrap());

2334 return Res;

2335 }

2336 }

2337

2338 auto TryToNarrowDeduceFlags = [this, &I, &Op0, &Op1]() -> Instruction * {

2339 if (Instruction *Ext = narrowMathIfNoOverflow(I))

2340 return Ext;

2341

2343 if (I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) {

2345 I.setHasNoSignedWrap(true);

2346 }

2347 if (I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) {

2349 I.setHasNoUnsignedWrap(true);

2350 }

2351

2352 return Changed ? &I : nullptr;

2353 };

2354

2355

2356

2357

2362 I.hasNoSignedWrap(),

2363 Op1, *this))

2364 return BinaryOperator::CreateAdd(NegOp1, Op0);

2365 }

2366 if (IsNegation)

2367 return TryToNarrowDeduceFlags();

2368

2369

2372

2373 if (I.getType()->isIntOrIntVectorTy(1))

2374 return BinaryOperator::CreateXor(Op0, Op1);

2375

2376

2379

2380

2383 return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X);

2384

2385

2391 return BinaryOperator::CreateAnd(

2393 }

2394

2395

2396

2397

2403 return BinaryOperator::CreateSub(XZ, YW);

2404 }

2405

2406

2410 bool HasNSW = HasNUW && I.hasNoSignedWrap() && LHSSub->hasNoSignedWrap();

2411 Value *Add = Builder.CreateAdd(Y, Op1, "", HasNUW,

2412 HasNSW);

2414 Sub->setHasNoUnsignedWrap(HasNUW);

2415 Sub->setHasNoSignedWrap(HasNSW);

2416 return Sub;

2417 }

2418

2419 {

2420

2421

2422

2423

2426 return BinaryOperator::CreateSub(X, Y);

2427

2428

2434 return BinaryOperator::CreateAdd(OpsSub, ConstsSub);

2435 }

2436 }

2437

2438 {

2443 if (W == Y)

2444 R = BinaryOperator::CreateSub(X, Z);

2445 else if (W == Z)

2446 R = BinaryOperator::CreateSub(X, Y);

2447 else if (X == Y)

2448 R = BinaryOperator::CreateSub(W, Z);

2449 else if (X == Z)

2450 R = BinaryOperator::CreateSub(W, Y);

2451 if (R) {

2452 bool NSW = I.hasNoSignedWrap() &&

2455

2456 bool NUW = I.hasNoUnsignedWrap() &&

2458 R->setHasNoSignedWrap(NSW);

2459 R->setHasNoUnsignedWrap(NUW);

2460 return R;

2461 }

2462 }

2463 }

2464

2465

2466 {

2467

2468

2469 bool ConsumesOp0, ConsumesOp1;

2471 isFreeToInvert(Op1, Op1->hasOneUse(), ConsumesOp1) &&

2472 (ConsumesOp0 || ConsumesOp1)) {

2475 assert(NotOp0 != nullptr && NotOp1 != nullptr &&

2476 "isFreeToInvert desynced with getFreelyInverted");

2477 return BinaryOperator::CreateSub(NotOp1, NotOp0);

2478 }

2479 }

2480

2481 auto m_AddRdx = [](Value *&Vec) {

2483 };

2485 if (match(Op0, m_AddRdx(V0)) && match(Op1, m_AddRdx(V1)) &&

2487

2488

2490 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_add,

2491 {Sub->getType()}, {Sub});

2493 }

2494

2498

2501

2503

2504

2507

2508

2511 return R;

2512

2513

2516 return R;

2517

2519

2520

2523 }

2524

2525 const APInt *Op0C;

2527 if (Op0C->isMask()) {

2528

2529

2530

2532 Op1, SQ.getWithInstruction(&I).getWithoutDomCondCache());

2533 if ((*Op0C | RHSKnown.Zero).isAllOnes())

2534 return BinaryOperator::CreateXor(Op1, Op0);

2535 }

2536

2537

2538

2539

2540

2541 const APInt *C2, *C3;

2546 APInt C2AndC3 = *C2 & *C3;

2547 APInt C2AndC3Minus1 = C2AndC3 - 1;

2548 APInt C2AddC3 = *C2 + *C3;

2549 if ((*C3 - C2AndC3Minus1).isPowerOf2() &&

2550 C2AndC3Minus1.isSubsetOf(C2AddC3)) {

2551 Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(), *C2));

2552 return BinaryOperator::CreateAdd(

2553 And, ConstantInt::get(I.getType(), *Op0C - C2AndC3));

2554 }

2555 }

2556 }

2557

2558 {

2560

2563

2564

2567 }

2568

2569

2570 {

2574 return BinaryOperator::CreateXor(A, B);

2575 }

2576

2577

2578 {

2582 return BinaryOperator::CreateAnd(A, B);

2583 }

2584

2585

2586 {

2590 return BinaryOperator::CreateOr(A, B);

2591 }

2592

2593

2594 {

2598 (Op0->hasOneUse() || Op1->hasOneUse()))

2600 }

2601

2602

2603 {

2607 return BinaryOperator::CreateAnd(A, B);

2608 }

2609

2610

2611 {

2615 (Op0->hasOneUse() || Op1->hasOneUse()))

2617 }

2618

2619 {

2621

2623 return BinaryOperator::CreateAnd(

2624 Y, Builder.CreateNot(Op1, Op1->getName() + ".not"));

2625 }

2626

2627 {

2628

2634 }

2635 }

2636

2637 {

2638

2643 }

2644 }

2645

2646 {

2647

2648

2650 auto m_SubXorCmp = [&C, &X](Value *LHS, Value *RHS) {

2654 };

2655 if (m_SubXorCmp(Op0, Op1))

2657 if (m_SubXorCmp(Op1, Op0))

2659 }

2660

2662 return R;

2663

2665 return R;

2666

2667 {

2668

2669

2670

2671

2672

2673

2674

2675

2676

2677

2678 auto SinkSubIntoSelect =

2681 Value *Cond, *TrueVal, *FalseVal;

2684 return nullptr;

2685 if (OtherHandOfSub != TrueVal && OtherHandOfSub != FalseVal)

2686 return nullptr;

2687

2688

2689

2690 bool OtherHandOfSubIsTrueVal = OtherHandOfSub == TrueVal;

2691 Value *NewSub = SubBuilder(OtherHandOfSubIsTrueVal ? FalseVal : TrueVal);

2695 OtherHandOfSubIsTrueVal ? NewSub : Zero);

2696

2698 return NewSel;

2699 };

2700 if (Instruction *NewSel = SinkSubIntoSelect(

2701 Op0, Op1,

2703 return Builder->CreateSub(OtherHandOfSelect,

2704 Op1);

2705 }))

2706 return NewSel;

2707 if (Instruction *NewSel = SinkSubIntoSelect(

2708 Op1, Op0,

2710 return Builder->CreateSub(Op0,

2711 OtherHandOfSelect);

2712 }))

2713 return NewSel;

2714 }

2715

2716

2719 return BinaryOperator::CreateAnd(

2720 Op0, Builder.CreateNot(Y, Y->getName() + ".not"));

2721

2722

2723

2724

2725

2726

2727

2728

2733 return BinaryOperator::CreateSub(Not, X);

2734 }

2737 !Op1->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {

2739 return BinaryOperator::CreateSub(X, Not);

2740 }

2741

2742

2743

2744

2747 I.getType()->getScalarSizeInBits() != 1 &&

2748 (Op0->hasOneUse() || Op1->hasOneUse())) {

2751 }

2754 I.getType()->getScalarSizeInBits() != 1 &&

2755 (Op0->hasOneUse() || Op1->hasOneUse())) {

2758 }

2759

2760

2761

2762 Value *LHSOp, *RHSOp;

2766 I.hasNoUnsignedWrap()))

2768

2769

2773 false))

2775

2776 auto MatchSubOfZExtOfPtrToIntOrAddr = [&]() {

2779 return true;

2782 return true;

2783

2784

2787 return true;

2788 return false;

2789 };

2790 if (MatchSubOfZExtOfPtrToIntOrAddr()) {

2792 if (GEP->getPointerOperand() == RHSOp) {

2793 if (GEP->hasNoUnsignedWrap() || GEP->hasNoUnsignedSignedWrap()) {

2795 Value *Res = GEP->hasNoUnsignedWrap()

2797 Offset, I.getType(), "",

2798 GEP->hasNoUnsignedSignedWrap())

2801 }

2802 }

2803 }

2804 }

2805

2806

2807

2808

2809

2810

2812 const APInt *ShAmt;

2813 Type *Ty = I.getType();

2814 unsigned BitWidth = Ty->getScalarSizeInBits();

2816 Op1->hasNUses(2) && *ShAmt == BitWidth - 1 &&

2818

2819

2820

2822

2823 Value *NegA = I.hasNoUnsignedWrap()

2825 : Builder.CreateNeg(A, "", I.hasNoSignedWrap());

2827 }

2828

2829

2830

2831

2832

2833 const APInt *AddC, *AndC;

2838 if ((HighMask & *AndC).isZero())

2839 return BinaryOperator::CreateAnd(Op0, ConstantInt::get(Ty, ~(*AndC)));

2840 }

2841

2844 return V;

2845

2846

2850 I, Builder.CreateIntrinsic(Intrinsic::umin, {I.getType()}, {Op0, Y}));

2851

2852

2853

2854

2857 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op1}));

2858

2859

2862 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op0, X}));

2863

2864

2866 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op0});

2868 }

2869

2870

2872 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op1, X});

2874 }

2875

2876

2880 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},

2881 {Builder.CreateNot(X)}));

2882

2883

2884

2889 bool PropagateNSW = I.hasNoSignedWrap() && OBO0->hasNoSignedWrap() &&

2890 OBO1->hasNoSignedWrap() && BitWidth > 2;

2891 bool PropagateNUW = I.hasNoUnsignedWrap() && OBO0->hasNoUnsignedWrap() &&

2892 OBO1->hasNoUnsignedWrap() && BitWidth > 1;

2893 Value *Add = Builder.CreateAdd(X, Y, "add", PropagateNUW, PropagateNSW);

2894 Value *Sub = Builder.CreateSub(X, Y, "sub", PropagateNUW, PropagateNSW);

2897 }

2898

2899

2902 if (I.hasNoUnsignedWrap() || I.hasNoSignedWrap()) {

2904 Builder.CreateSub(X, Y, "sub", false, true);

2906 Builder.CreateBinaryIntrinsic(Intrinsic::abs, Sub, Builder.getTrue());

2908 }

2909 }

2910

2912 return Res;

2913

2914

2919 }

2920

2921

2922

2923 {

2924 Value *Z, *Add0, *Add1;

2931 unsigned NumOfNewInstrs = 0;

2932

2935

2936 unsigned NumOfDeadInstrs = 0;

2938

2939

2940

2941 ++NumOfDeadInstrs;

2942 NumOfDeadInstrs += Add0->hasOneUse() ? 1 : 0;

2943 }

2944 if (Op1->hasOneUse()) {

2945 ++NumOfDeadInstrs;

2946 NumOfDeadInstrs += Add1->hasOneUse() ? 1 : 0;

2947 }

2948 if (NumOfDeadInstrs >= NumOfNewInstrs) {

2950 Value *SExtZ = Builder.CreateSExt(Z, I.getType());

2952 false,

2953 I.hasNoSignedWrap());

2955 }

2956 }

2957 }

2958

2959 return TryToNarrowDeduceFlags();

2960}

2961

2962

2963

2965

2966

2967

2970 return nullptr;

2971

2974

2975

2976

2985 }

2986

2990

2994

2995

2996

2997

3002 return FDiv;

3003 }

3004

3005

3009

3010 return nullptr;

3011}

3012

3013Instruction *InstCombinerImpl::hoistFNegAboveFMulFDiv(Value *FNegOp,

3014 Instruction &FMFSource) {

3017

3018

3020 X, Builder.CreateFNegFMF(Y, &FMFSource), &FMFSource));

3021 }

3022

3025 Builder.CreateFNegFMF(X, &FMFSource), Y, &FMFSource));

3026 }

3027

3029

3030 if (II->getIntrinsicID() == Intrinsic::ldexp) {

3031 FastMathFlags FMF = FMFSource.getFastMathFlags() | II->getFastMathFlags();

3032 CallInst *New =

3033 Builder.CreateCall(II->getCalledFunction(),

3034 {Builder.CreateFNegFMF(II->getArgOperand(0), FMF),

3035 II->getArgOperand(1)});

3036 New->setFastMathFlags(FMF);

3037 New->copyMetadata(*II);

3038 return New;

3039 }

3040 }

3041

3042 return nullptr;

3043}

3044

3046 Value *Op = I.getOperand(0);

3047

3051

3053 return X;

3054

3056

3057

3058 if (I.hasNoSignedZeros() &&

3061

3064 return nullptr;

3065

3066 if (Instruction *R = hoistFNegAboveFMulFDiv(OneUse, I))

3068

3069

3072

3073

3074

3075 auto propagateSelectFMF = [&](SelectInst *S, bool CommonOperand) {

3078 FastMathFlags FMF = I.getFastMathFlags() | OldSel->getFastMathFlags();

3080 if (!OldSel->hasNoSignedZeros() && !CommonOperand &&

3083 }

3084 };

3085

3088 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg");

3090 propagateSelectFMF(NewSel, P == Y);

3091 return NewSel;

3092 }

3093

3095 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg");

3097 propagateSelectFMF(NewSel, P == X);

3098 return NewSel;

3099 }

3100

3101

3102

3104 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg");

3105 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg");

3107 propagateSelectFMF(NewSel, true);

3108 return NewSel;

3109 }

3110 }

3111

3112

3114

3115

3119 Value *NewCopySign = Builder.CreateCopySign(X, NegY, FMF);

3121 }

3122

3123

3127

3128

3132 }

3133

3134 return nullptr;

3135}

3136

3139 I.getFastMathFlags(),

3142

3144 return X;

3145

3147 return Phi;

3148

3149

3150

3151

3152

3153

3154

3155

3159

3161 return X;

3162

3163 if (Instruction *R = foldFBinOpOfIntCasts(I))

3164 return R;

3165

3168

3169 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);

3170

3171

3172

3173

3174

3175

3176 if (I.hasNoSignedZeros() ||

3181 }

3182 }

3183

3184

3189 }

3190

3194 return NV;

3195

3196

3197

3198

3202

3203

3206

3207

3208

3209 Type *Ty = I.getType();

3212

3213

3216

3217

3218

3219

3223 }

3224

3225

3230 }

3231

3232

3235

3236 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {

3237

3240

3241

3242

3245

3246

3249 Instruction::FSub, C, ConstantFP::get(Ty, 1.0), DL))

3251 }

3252

3255 Instruction::FSub, ConstantFP::get(Ty, 1.0), C, DL))

3257 }

3258

3259

3260

3261

3268 }

3269

3270 auto m_FaddRdx = [](Value *&Sum, Value *&Vec) {

3273 };

3274 Value *A0, *A1, *V0, *V1;

3275 if (match(Op0, m_FaddRdx(A0, V0)) && match(Op1, m_FaddRdx(A1, V1)) &&

3277

3278

3280 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,

3281 {Sub->getType()}, {A0, Sub}, &I);

3283 }

3284

3286 return F;

3287

3288

3289

3290

3291

3294

3295

3299 }

3300 }

3301

3302 return nullptr;

3303}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

static bool isConstant(const MachineInstr &MI)

AMDGPU Register Bank Select

This file declares a class to represent arbitrary precision floating point values and provide a varie...

This file implements a class to represent arbitrary precision integral constant values and operations...

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

This file contains the declarations for the subclasses of Constant, which represent the different fla...

static Instruction * factorizeFAddFSub(BinaryOperator &I, InstCombiner::BuilderTy &Builder)

Factor a common operand out of fadd/fsub of fmul/fdiv.

Definition InstCombineAddSub.cpp:1944

static Instruction * foldAddToAshr(BinaryOperator &Add)

Try to reduce signed division by power-of-2 to an arithmetic shift right.

Definition InstCombineAddSub.cpp:1280

static bool MatchMul(Value *E, Value *&Op, APInt &C)

Definition InstCombineAddSub.cpp:1084

static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned)

Definition InstCombineAddSub.cpp:1124

static Instruction * foldFNegIntoConstant(Instruction &I, const DataLayout &DL)

This eliminates floating-point negation in either 'fneg(X)' or 'fsub(-0.0, X)' form by combining into...

Definition InstCombineAddSub.cpp:2964

static Instruction * combineAddSubWithShlAddSub(InstCombiner::BuilderTy &Builder, const BinaryOperator &I)

Definition InstCombineAddSub.cpp:1267

static Instruction * factorizeLerp(BinaryOperator &I, InstCombiner::BuilderTy &Builder)

Eliminate an op from a linear interpolation (lerp) pattern.

Definition InstCombineAddSub.cpp:1928

static Instruction * foldSubOfMinMax(BinaryOperator &I, InstCombiner::BuilderTy &Builder)

Definition InstCombineAddSub.cpp:2235

static Instruction * foldBoxMultiply(BinaryOperator &I)

Reduce a sequence of masked half-width multiplies to a single multiply.

Definition InstCombineAddSub.cpp:1484

static Value * checkForNegativeOperand(BinaryOperator &I, InstCombiner::BuilderTy &Builder)

Definition InstCombineAddSub.cpp:752

static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned)

Definition InstCombineAddSub.cpp:1145

static Instruction * foldNoWrapAdd(BinaryOperator &Add, InstCombiner::BuilderTy &Builder)

Wrapping flags may allow combining constants separated by an extend.

Definition InstCombineAddSub.cpp:809

static bool matchesSquareSum(BinaryOperator &I, Mul2Rhs M2Rhs, Value *&A, Value *&B)

Definition InstCombineAddSub.cpp:1027

static Instruction * factorizeMathWithShlOps(BinaryOperator &I, InstCombiner::BuilderTy &Builder)

This is a specialization of a more general transform from foldUsingDistributiveLaws.

Definition InstCombineAddSub.cpp:1448

static Instruction * canonicalizeLowbitMask(BinaryOperator &I, InstCombiner::BuilderTy &Builder)

Fold (1 << NBits) - 1 Into: ~(-(1 << NBits)) Because a 'not' is better for bit-tracking analysis and ...

Definition InstCombineAddSub.cpp:1223

static Instruction * foldToUnsignedSaturatedAdd(BinaryOperator &I)

Definition InstCombineAddSub.cpp:1241

static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned)

Definition InstCombineAddSub.cpp:1102

This file provides internal interfaces used to implement the InstCombine.

This file provides the interface for the instcombine pass implementation.

static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)

uint64_t IntrinsicInst * II

const SmallVectorImpl< MachineOperand > & Cond

This file defines the SmallVector class.

static unsigned getScalarSizeInBits(Type *Ty)

static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")

static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")

const fltSemantics & getSemantics() const

opStatus multiply(const APFloat &RHS, roundingMode RM)

Class for arbitrary precision integers.

LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const

bool isNegatedPowerOf2() const

Check if this APInt's negated value is a power of two greater than zero.

bool isMinSignedValue() const

Determine if this is the smallest signed value.

LLVM_ABI APInt trunc(unsigned width) const

Truncate to new width.

static APInt getMaxValue(unsigned numBits)

Gets maximum unsigned value of APInt for specific bit width.

bool isZero() const

Determine if this value is zero, i.e. all bits are clear.

bool isSignMask() const

Check if the APInt's value is returned by getSignMask.

unsigned getBitWidth() const

Return the number of bits in the APInt.

bool isNegative() const

Determine sign of this APInt.

int32_t exactLogBase2() const

unsigned countr_zero() const

Count the number of trailing zero bits.

unsigned countl_zero() const

The APInt version of std::countl_zero.

static APInt getSignedMinValue(unsigned numBits)

Gets minimum signed value of APInt for a specific bit width.

unsigned logBase2() const

LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const

bool isMask(unsigned numBits) const

LLVM_ABI APInt sext(unsigned width) const

Sign extend to a new width.

bool isSubsetOf(const APInt &RHS) const

This operation checks that all bits set in this APInt are also set in RHS.

bool isPowerOf2() const

Check if this APInt's value is a power of two greater than zero.

static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)

Constructs an APInt value that has the top hiBitsSet bits set.

bool sge(const APInt &RHS) const

Signed greater or equal comparison.

bool isOne() const

Determine if this is a value of 1.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...

static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a Trunc or BitCast cast instruction.

static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...

@ ICMP_UGT

unsigned greater than

@ ICMP_SGT

signed greater than

An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...

static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)

static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)

ConstantFP - Floating Point Values [float, double].

const APFloat & getValueAPF() const

bool isZero() const

Return true if the value is positive or negative zero.

static ConstantInt * getSigned(IntegerType *Ty, int64_t V)

Return a ConstantInt with the specified value for the specified type.

This is an important base class in LLVM.

static LLVM_ABI Constant * getAllOnesValue(Type *Ty)

static LLVM_ABI Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

LLVM_ABI bool isElementWiseEqual(Value *Y) const

Return true if this constant and a constant 'Y' are element-wise equal.

A parsed version of the target data layout string in and methods for querying it.

Convenience struct for specifying and reasoning about fast-math flags.

static FastMathFlags intersectRewrite(FastMathFlags LHS, FastMathFlags RHS)

Intersect rewrite-based flags.

bool noSignedZeros() const

static FastMathFlags unionValue(FastMathFlags LHS, FastMathFlags RHS)

Union value flags.

void setNoInfs(bool B=true)

static bool isLT(Predicate P)

Return true if the predicate is SLT or ULT.

static bool isGT(Predicate P)

Return true if the predicate is SGT or UGT.

Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)

Tries to simplify binops of select and cast of the select condition.

Instruction * visitAdd(BinaryOperator &I)

Definition InstCombineAddSub.cpp:1523

Instruction * canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(BinaryOperator &I)

Definition InstCombineAddSub.cpp:1349

Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)

This is a convenience wrapper function for the above two functions.

bool SimplifyAssociativeOrCommutative(BinaryOperator &I)

Performs a few simplifications for operators which are associative or commutative.

Value * foldUsingDistributiveLaws(BinaryOperator &I)

Tries to simplify binary operations which some other binary operation distributes over.

Instruction * foldBinOpShiftWithShift(BinaryOperator &I)

Instruction * foldSquareSumInt(BinaryOperator &I)

Definition InstCombineAddSub.cpp:1060

Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)

Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...

Instruction * foldSquareSumFP(BinaryOperator &I)

Definition InstCombineAddSub.cpp:1071

Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)

Given an instruction with a select as one operand and a constant as the other operand,...

Instruction * visitSub(BinaryOperator &I)

Definition InstCombineAddSub.cpp:2281

Value * OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty, bool isNUW)

Optimize pointer differences into the same array into a size.

Definition InstCombineAddSub.cpp:2195

Instruction * visitFAdd(BinaryOperator &I)

Definition InstCombineAddSub.cpp:1989

Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)

For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.

Instruction * foldAddLikeCommutative(Value *LHS, Value *RHS, bool NSW, bool NUW)

Common transforms for add / disjoint or.

Definition InstCombineAddSub.cpp:1318

Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)

Value * SimplifyAddWithRemainder(BinaryOperator &I)

Tries to simplify add operations using the definition of remainder.

Definition InstCombineAddSub.cpp:1158

Instruction * foldAddWithConstant(BinaryOperator &Add)

Definition InstCombineAddSub.cpp:856

Instruction * foldVectorBinop(BinaryOperator &Inst)

Canonicalize the position of binops relative to shufflevector.

Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)

Instruction * visitFNeg(UnaryOperator &I)

Definition InstCombineAddSub.cpp:3045

Instruction * visitFSub(BinaryOperator &I)

Definition InstCombineAddSub.cpp:3137

IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy

An IRBuilder that automatically inserts new instructions into the worklist.

bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)

Return true if the specified value is free to invert (apply ~ to).

unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const

Instruction * replaceInstUsesWith(Instruction &I, Value *V)

A combiner-aware RAUW-like routine.

static Constant * SubOne(Constant *C)

Subtract one from a Constant.

InstructionWorklist & Worklist

A worklist of the instructions that need to be simplified.

Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)

Replace operand of instruction and add old operand to the worklist.

Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)

const SimplifyQuery & getSimplifyQuery() const

static Constant * AddOne(Constant *C)

Add one to a Constant.

LLVM_ABI void setHasNoUnsignedWrap(bool b=true)

Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.

LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY

Determine whether the no unsigned wrap flag is set.

LLVM_ABI void copyFastMathFlags(FastMathFlags FMF)

Convenience function for transferring all fast-math flag values to this instruction,...

LLVM_ABI void setHasNoSignedZeros(bool B)

Set or clear the no-signed-zeros flag on this instruction, which must be an operator which supports t...

LLVM_ABI void setHasNoSignedWrap(bool b=true)

Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

LLVM_ABI void setFastMathFlags(FastMathFlags FMF)

Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...

LLVM_ABI void setHasNoInfs(bool B)

Set or clear the no-infs flag on this instruction, which must be an operator which supports this flag...

LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY

Convenience function for getting all the fast-math flags, which must be an operator which supports th...

void setDebugLoc(DebugLoc Loc)

Set the debug location information for this instruction.

LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())

Copy metadata from SrcInst to this instruction.

static Value * Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC)

Attempt to negate Root.

Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.

bool hasNoSignedWrap() const

Test whether this operation is known to never undergo signed overflow, aka the nsw property.

bool hasNoUnsignedWrap() const

Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.

This class represents the LLVM 'select' instruction.

static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)

This instruction constructs a fixed permutation of two input vectors.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

bool contains(ConstPtrType Ptr) const

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

The instances of the Type class are immutable: once they are created, they are never changed.

LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY

If this is a vector type, return the getPrimitiveSizeInBits value for the element type.

static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

bool hasOneUse() const

Return true if there is exactly one use of this value.

LLVM_ABI bool hasNUsesOrMore(unsigned N) const

Return true if this value has N uses or more.

LLVM_ABI StringRef getName() const

Return a constant reference to the value's name.

This class represents zero extension of integer types.

@ C

The default llvm calling convention, compatible with C.

LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})

Look up the Function declaration of the intrinsic id in the Module M.

SpecificConstantMatch m_ZeroInt()

Convenience matchers for specific integer values.

BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)

Matches a register negated by a G_SUB.

BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)

Matches a register not-ed by a G_XOR.

OneUse_match< SubPat > m_OneUse(const SubPat &SP)

cst_pred_ty< is_all_ones > m_AllOnes()

Match an integer or vector with all bits set.

class_match< PoisonValue > m_Poison()

Match an arbitrary poison constant.

cst_pred_ty< is_lowbit_mask > m_LowBitMask()

Match an integer or vector with only the low bit(s) set.

BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)

PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)

BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)

class_match< BinaryOperator > m_BinOp()

Match an arbitrary binary operation and ignore it.

BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)

Matches FMul with LHS and RHS in either order.

BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)

auto m_PtrToIntOrAddr(const OpTy &Op)

Matches PtrToInt or PtrToAddr.

BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)

cst_pred_ty< is_power2 > m_Power2()

Match an integer or vector power-of-2.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap, true > m_c_NSWAdd(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)

match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)

CommutativeBinaryIntrinsic_match< IntrID, T0, T1 > m_c_Intrinsic(const T0 &Op0, const T1 &Op1)

class_match< Constant > m_Constant()

Match an arbitrary Constant and ignore it.

ap_match< APInt > m_APInt(const APInt *&Res)

Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.

BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)

Matches an And with LHS and RHS in either order.

CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)

Matches Trunc.

BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)

CastOperator_match< OpTy, Instruction::PtrToAddr > m_PtrToAddr(const OpTy &Op)

Matches PtrToAddr.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)

specific_intval< false > m_SpecificInt(const APInt &V)

Match a specific integer value or vector with all elements equal to the value.

BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)

match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)

bool match(Val *V, const Pattern &P)

bind_ty< Instruction > m_Instruction(Instruction *&I)

Match an instruction, capturing it if we match.

cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()

Match a floating-point negative zero or positive zero.

specificval_ty m_Specific(const Value *V)

Match if we have a specific specified value.

DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)

specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)

ap_match< APFloat > m_APFloat(const APFloat *&Res)

Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...

CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)

Matches an ICmp with a predicate over LHS and RHS in either order.

cst_pred_ty< is_nonnegative > m_NonNegative()

Match an integer or vector of non-negative values.

cst_pred_ty< is_one > m_One()

Match an integer 1 or a vector with all elements equal to 1.

IntrinsicID_match m_Intrinsic()

Match intrinsic calls like this: m_IntrinsicIntrinsic::fabs(m_Value(X))

ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)

Matches SelectInst.

match_combine_or< CastInst_match< OpTy, SExtInst >, OpTy > m_SExtOrSelf(const OpTy &Op)

specific_fpval m_SpecificFP(double V)

Match a specific floating point value or vector with all elements equal to the value.

BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)

Matches an Xor with LHS and RHS in either order.

BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)

deferredval_ty< Value > m_Deferred(Value *const &V)

Like m_Specific(), but works if the specific value to match is determined as part of the same match()...

MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)

Matches an SMin with LHS and RHS in either order.

TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)

Matches ShuffleVectorInst independently of mask value.

MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)

Matches a UMax with LHS and RHS in either order.

ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)

Match Select(C, LHS, RHS) or Select(C, RHS, LHS)

CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)

SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)

CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)

Matches ZExt.

BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)

MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)

cst_pred_ty< is_negated_power2 > m_NegatedPower2()

Match a integer or vector negated power-of-2.

match_immconstant_ty m_ImmConstant()

Match an arbitrary immediate Constant and ignore it.

specific_fpval m_FPOne()

Match a float 1.0 or vector with all elements equal to 1.0.

MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)

Matches a UMin with LHS and RHS in either order.

BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)

Matches a Add with LHS and RHS in either order.

match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)

Match either "add" or "or disjoint".

MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)

Matches an SMax with LHS and RHS in either order.

match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)

match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)

Match either "sext" or "zext nneg".

BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)

OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)

match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)

Match either "add nsw" or "or disjoint".

class_match< Value > m_Value()

Match an arbitrary value and ignore it.

AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)

Matches a BinaryOperator with LHS and RHS in either order.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)

CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)

match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)

FNeg_match< OpTy > m_FNeg(const OpTy &X)

Match 'fneg X' as 'fsub -0.0, X'.

BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)

Matches FAdd with LHS and RHS in either order.

BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::FDiv > m_FDiv(const LHS &L, const RHS &R)

m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)

BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)

Matches integer remainder operations.

CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)

BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)

CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)

Matches SExt.

is_zero m_Zero()

Match any null constant or a vector with all elements equal to 0.

BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)

Matches an Or with LHS and RHS in either order.

match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)

Match either "add nuw" or "or disjoint".

BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)

Matches a Mul with LHS and RHS in either order.

m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)

CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)

Matches PtrToInt.

MatchFunctor< Val, Pattern > match_fn(const Pattern &P)

A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.

BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)

MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)

match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)

Combine two pattern matchers matching L || R.

@ CE

Windows NT (Windows on ARM)

Context & getContext() const

friend class Instruction

Iterator for Instructions in a `BasicBlock.

This is an optimization pass for GlobalISel generic memory operations.

LLVM_ABI bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)

Return true if LHS and RHS have no common bits set.

LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)

FunctionAddr VTableAddr Value

constexpr bool isInt(int64_t x)

Checks if an integer fits into the given bit width.

LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)

Given an exploded icmp instruction, return true if the comparison only checks the sign bit.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt & operator+=(DynamicAPInt &A, int64_t B)

LLVM_ABI bool canIgnoreSignBitOfZero(const Use &U)

Return true if the sign bit of the FP value can be ignored by the user when the value is zero.

LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)

Returns true if V cannot be undef, but may be poison.

LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)

Return true if 'V & Mask' is known to be zero.

LLVM_ABI Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)

Given operands for a Sub, fold the result or return null.

LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)

Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...

LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)

Given operands for an Add, fold the result or return null.

LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt & operator*=(DynamicAPInt &A, int64_t B)

LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)

Attempt to constant fold a unary operation with the specified operand.

LLVM_ABI Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)

Given operand for an FNeg, fold the result or return null.

LLVM_ABI Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)

Given operands for an FSub, fold the result or return null.

decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)

LLVM_ABI Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)

Given operands for an FAdd, fold the result or return null.

LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)

Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...

LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)

Return true if we can prove that the specified FP value is never equal to -0.0.

bool none_of(R &&Range, UnaryPredicate P)

Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)

Attempt to constant fold a binary operation with the specified operands.

LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)

Return true if the given value is known to be non-zero when defined.

@ First

Helpers to iterate all locations in the MemoryEffectsBase class.

@ Mul

Product of integers.

@ SMin

Signed integer min implemented in terms of select(cmp()).

@ Sub

Subtraction of integers.

@ UMax

Unsigned integer max implemented in terms of select(cmp()).

DWARFExpression::Operation Op

RoundingMode

Rounding mode.

LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)

Return true if this function can prove that V does not have undef bits and is never poison.

constexpr unsigned BitWidth

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

A suitably aligned and sized character array member which can hold elements of any type.

Value * Ptr

Common base pointer.

SmallVector< GEPOperator * > RHSGEPs

RHS GEPs until common base.

SmallVector< GEPOperator * > LHSGEPs

LHS GEPs until common base.

bool isExpensive() const

Whether expanding the GEP chains is expensive.

Definition InstCombineAddSub.cpp:2172

static CommonPointerBase compute(Value *LHS, Value *RHS)

Definition InstCombineAddSub.cpp:2112