LLVM: lib/Transforms/InstCombine/InstructionCombining.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

100#include

101#include

102#include

103#include

104#include

105#include

106#include

107

108#define DEBUG_TYPE "instcombine"

110#include

111

112using namespace llvm;

114

116 "Number of instruction combining iterations performed");

117STATISTIC(NumOneIteration, "Number of functions with one iteration");

118STATISTIC(NumTwoIterations, "Number of functions with two iterations");

119STATISTIC(NumThreeIterations, "Number of functions with three iterations");

121 "Number of functions with four or more iterations");

122

123STATISTIC(NumCombined , "Number of insts combined");

124STATISTIC(NumConstProp, "Number of constant folds");

125STATISTIC(NumDeadInst , "Number of dead inst eliminated");

126STATISTIC(NumSunkInst , "Number of instructions sunk");

128STATISTIC(NumFactor , "Number of factorizations");

129STATISTIC(NumReassoc , "Number of reassociations");

131 "Controls which instructions are visited");

132

136

138 "instcombine-max-sink-users", cl::init(32),

139 cl::desc("Maximum number of undroppable users for instruction sinking"));

140

143 cl::desc("Maximum array size considered when doing a combine"));

144

145

146

147

148

149

150

151

154

155std::optional<Instruction *>

157

158 if (II.getCalledFunction()->isTargetIntrinsic()) {

160 }

161 return std::nullopt;

162}

163

166 bool &KnownBitsComputed) {

167

168 if (II.getCalledFunction()->isTargetIntrinsic()) {

170 *this, II, DemandedMask, Known, KnownBitsComputed);

171 }

172 return std::nullopt;

173}

174

177 APInt &PoisonElts2, APInt &PoisonElts3,

179 SimplifyAndSetOp) {

180

181 if (II.getCalledFunction()->isTargetIntrinsic()) {

183 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,

184 SimplifyAndSetOp);

185 }

186 return std::nullopt;

187}

188

190

191

192

194}

195

196Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {

197 if (!RewriteGEP)

199

201 auto *Inst = dyn_cast(GEP);

202 if (Inst)

204

206

207

208 if (Inst && GEP->hasOneUse() && GEP->hasAllConstantIndices() &&

209 GEP->getSourceElementType()->isIntegerTy(8)) {

212 Offset, "", GEP->getNoWrapFlags()));

214 }

216}

217

218

219

220

221

222

223bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {

225 case 8:

226 case 16:

227 case 32:

228 return true;

229 default:

231 }

232}

233

234

235

236

237

238

239

240

241

242bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,

243 unsigned ToWidth) const {

244 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);

246

247

248

249 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))

250 return true;

251

252

253

254 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)

255 return false;

256

257

258

259 if (!FromLegal && !ToLegal && ToWidth > FromWidth)

260 return false;

261

262 return true;

263}

264

265

266

267

268

269

270bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {

271

272

274 return false;

275

276 unsigned FromWidth = From->getPrimitiveSizeInBits();

278 return shouldChangeType(FromWidth, ToWidth);

279}

280

281

282

283

284

285

287 auto *OBO = dyn_cast(&I);

288 if (!OBO || !OBO->hasNoSignedWrap())

289 return false;

290

291 const APInt *BVal, *CVal;

293 return false;

294

295

296 bool Overflow = false;

297 switch (I.getOpcode()) {

298 case Instruction::Add:

299 (void)BVal->sadd_ov(*CVal, Overflow);

300 break;

301 case Instruction::Sub:

302 (void)BVal->ssub_ov(*CVal, Overflow);

303 break;

304 case Instruction::Mul:

305 (void)BVal->smul_ov(*CVal, Overflow);

306 break;

307 default:

308

309 return false;

310 }

311 return !Overflow;

312}

313

315 auto *OBO = dyn_cast(&I);

316 return OBO && OBO->hasNoUnsignedWrap();

317}

318

320 auto *OBO = dyn_cast(&I);

321 return OBO && OBO->hasNoSignedWrap();

322}

323

324

325

326

329 if (!FPMO) {

330 I.clearSubclassOptionalData();

331 return;

332 }

333

335 I.clearSubclassOptionalData();

336 I.setFastMathFlags(FMF);

337}

338

339

340

341

342

345 auto *Cast = dyn_cast(BinOp1->getOperand(0));

346 if (!Cast || !Cast->hasOneUse())

347 return false;

348

349

350 auto CastOpcode = Cast->getOpcode();

351 if (CastOpcode != Instruction::ZExt)

352 return false;

353

354

356 return false;

357

358 auto AssocOpcode = BinOp1->getOpcode();

359 auto *BinOp2 = dyn_cast(Cast->getOperand(0));

360 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)

361 return false;

362

366 return false;

367

368

369

370

371

372

373

377 if (!CastC2)

378 return false;

380 if (!FoldedC)

381 return false;

382

386 Cast->dropPoisonGeneratingFlags();

387 return true;

388}

389

390

391

392Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {

393 auto *IntToPtr = dyn_cast(Val);

396 auto *PtrToInt = dyn_cast(IntToPtr->getOperand(0));

397 Type *CastTy = IntToPtr->getDestTy();

398 if (PtrToInt &&

400 PtrToInt->getSrcTy()->getPointerAddressSpace() &&

403 return PtrToInt->getOperand(0);

404 }

405 return nullptr;

406}

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

430 bool Changed = false;

431

432 do {

433

434

435

436 if (I.isCommutative() && getComplexity(I.getOperand(0)) <

438 Changed = I.swapOperands();

439

440 if (I.isCommutative()) {

441 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {

444 Changed = true;

445 }

446 }

447

448 BinaryOperator *Op0 = dyn_cast(I.getOperand(0));

449 BinaryOperator *Op1 = dyn_cast(I.getOperand(1));

450

451 if (I.isAssociative()) {

452

453 if (Op0 && Op0->getOpcode() == Opcode) {

456 Value *C = I.getOperand(1);

457

458

460

465

466

467

468

470

471

472

473 if (IsNUW)

474 I.setHasNoUnsignedWrap(true);

475

476 if (IsNSW)

477 I.setHasNoSignedWrap(true);

478

479 Changed = true;

480 ++NumReassoc;

481 continue;

482 }

483 }

484

485

486 if (Op1 && Op1->getOpcode() == Opcode) {

487 Value *A = I.getOperand(0);

490

491

493

496

497

499 Changed = true;

500 ++NumReassoc;

501 continue;

502 }

503 }

504 }

505

506 if (I.isAssociative() && I.isCommutative()) {

508 Changed = true;

509 ++NumReassoc;

510 continue;

511 }

512

513

514 if (Op0 && Op0->getOpcode() == Opcode) {

517 Value *C = I.getOperand(1);

518

519

521

524

525

527 Changed = true;

528 ++NumReassoc;

529 continue;

530 }

531 }

532

533

534 if (Op1 && Op1->getOpcode() == Opcode) {

535 Value *A = I.getOperand(0);

538

539

541

544

545

547 Changed = true;

548 ++NumReassoc;

549 continue;

550 }

551 }

552

553

554

557 if (Op0 && Op1 &&

565 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?

568

569 if (isa(NewBO)) {

574 }

579

580

582 if (IsNUW)

583 I.setHasNoUnsignedWrap(true);

584

585 Changed = true;

586 continue;

587 }

588 }

589

590

591 return Changed;

592 } while (true);

593}

594

595

596

599

600

601 if (LOp == Instruction::And)

602 return ROp == Instruction::Or || ROp == Instruction::Xor;

603

604

605 if (LOp == Instruction::Or)

606 return ROp == Instruction::And;

607

608

609

610 if (LOp == Instruction::Mul)

611 return ROp == Instruction::Add || ROp == Instruction::Sub;

612

613 return false;

614}

615

616

617

622

623

625

626

627

628

629}

630

631

632

634 if (isa(V))

635 return nullptr;

636

638}

639

640

641

642

643

644

648 assert(Op && "Expected a binary operator");

649 LHS = Op->getOperand(0);

650 RHS = Op->getOperand(1);

651 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {

654

656 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);

657 assert(RHS && "Constant folding of immediate constants failed");

658 return Instruction::Mul;

659 }

660

661 }

663 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&

665

666 return Instruction::AShr;

667 }

668 }

669 return Op->getOpcode();

670}

671

672

673

678 assert(A && B && C && D && "All values must be provided");

679

680 Value *V = nullptr;

681 Value *RetVal = nullptr;

682 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);

684

685

687

688

690

691

692 if (A == C || (InnerCommutative && A == D)) {

693 if (A != C)

695

696

698

699

700

703 if (V)

704 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);

705 }

706 }

707

708

710

711

712 if (B == D || (InnerCommutative && B == C)) {

713 if (B != D)

715

716

718

719

720

723 if (V)

724 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);

725 }

726 }

727

728 if (!RetVal)

729 return nullptr;

730

731 ++NumFactor;

733

734

735 if (isa(RetVal)) {

736 bool HasNSW = false;

737 bool HasNUW = false;

738 if (isa(&I)) {

739 HasNSW = I.hasNoSignedWrap();

740 HasNUW = I.hasNoUnsignedWrap();

741 }

742 if (auto *LOBO = dyn_cast(LHS)) {

743 HasNSW &= LOBO->hasNoSignedWrap();

744 HasNUW &= LOBO->hasNoUnsignedWrap();

745 }

746

747 if (auto *ROBO = dyn_cast(RHS)) {

748 HasNSW &= ROBO->hasNoSignedWrap();

749 HasNUW &= ROBO->hasNoUnsignedWrap();

750 }

751

752 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {

753

754

755

756

757

758

759

760 const APInt *CInt;

762 cast(RetVal)->setHasNoSignedWrap(HasNSW);

763

764

765 cast(RetVal)->setHasNoUnsignedWrap(HasNUW);

766 }

767 }

768 return RetVal;

769}

770

771

772

773

774

775

776

777

778

780 unsigned Opc = I->getOpcode();

781 unsigned ConstIdx = 1;

782 switch (Opc) {

783 default:

784 return nullptr;

785

786

787

788 case Instruction::Sub:

789 ConstIdx = 0;

790 break;

791 case Instruction::ICmp:

792

793

794

795 if (cast(I)->isSigned())

796 return nullptr;

797 break;

798 case Instruction::Or:

800 return nullptr;

801 [[fallthrough]];

802 case Instruction::Add:

803 break;

804 }

805

807

808 if (match(I->getOperand(1 - ConstIdx),

810 return nullptr;

811

813

815 return nullptr;

816

817 Type *Ty = Op->getType();

819

820

821 if (Opc == Instruction::ICmp && !cast(I)->isEquality()) {

824 if (!Cmp || !Cmp->isZeroValue())

825 return nullptr;

826 }

827

828

829 bool Consumes = false;

831 return nullptr;

833 assert(NotOp != nullptr &&

834 "Desync between isFreeToInvert and getFreelyInverted");

835

837

838 Value *R = nullptr;

839

840

841

842 switch (Opc) {

843 case Instruction::Sub:

845 break;

846 case Instruction::Or:

847 case Instruction::Add:

849 break;

850 case Instruction::ICmp:

853 break;

854 default:

856 }

857 assert(R != nullptr);

859}

860

861

862

863

864

865

866

867

868

869

870

871

872

873

874

875

876

877

878

879

880

881

882

883

886 auto IsValidBinOpc = [](unsigned Opc) {

887 switch (Opc) {

888 default:

889 return false;

890 case Instruction::And:

891 case Instruction::Or:

892 case Instruction::Xor:

893 case Instruction::Add:

894

895

896 return true;

897 }

898 };

899

900

901

902 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,

903 unsigned ShOpc) {

904 assert(ShOpc != Instruction::AShr);

905 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||

906 ShOpc == Instruction::Shl;

907 };

908

909 auto GetInvShift = [](unsigned ShOpc) {

910 assert(ShOpc != Instruction::AShr);

911 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;

912 };

913

914 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,

915 unsigned ShOpc, Constant *CMask,

917

918 if (BinOpc1 == Instruction::And)

919 return true;

920

921

922

923 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))

924 return false;

925

926

927

928

929 if (BinOpc2 == Instruction::And)

930 return true;

931

932

933

937 CMask;

938 };

939

940 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {

942 Value *X, *Y, *ShiftedX, *Mask, *Shift;

943 if (match(I.getOperand(ShOpnum),

945 return nullptr;

946 if (match(I.getOperand(1 - ShOpnum),

951 return nullptr;

952

953 auto *IY = dyn_cast(I.getOperand(ShOpnum));

954 auto *IX = dyn_cast(ShiftedX);

955 if (!IY || !IX)

956 return nullptr;

957

958

959 unsigned ShOpc = IY->getOpcode();

960 if (ShOpc != IX->getOpcode())

961 return nullptr;

962

963

964 auto *BO2 = dyn_cast(I.getOperand(1 - ShOpnum));

965 if (!BO2)

966 return nullptr;

967

968 unsigned BinOpc = BO2->getOpcode();

969

970 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))

971 return nullptr;

972

973 if (ShOpc == Instruction::AShr) {

975 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {

980 }

981

982 return nullptr;

983 }

984

985

986

987 if (BinOpc == I.getOpcode() &&

988 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {

993 }

994

995

996

998 return nullptr;

1000 return nullptr;

1001

1002

1003 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))

1004 return nullptr;

1005

1012 NewBinOp1, CShift);

1013 };

1014

1016 return R;

1017 return MatchBinOp(1);

1018}

1019

1020

1021

1022

1023

1024

1025

1026

1027

1028

1031

1032

1034 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);

1035 Value *A, *CondVal, *TrueVal, *FalseVal;

1037

1038 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {

1040 A->getType()->getScalarSizeInBits() == 1 &&

1043 };

1044

1045

1046

1047 if (MatchSelectAndCast(LHS, RHS))

1048 CastOp = LHS;

1049 else if (MatchSelectAndCast(RHS, LHS))

1050 CastOp = RHS;

1051 else

1052 return nullptr;

1053

1054 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {

1055 bool IsCastOpRHS = (CastOp == RHS);

1056 bool IsZExt = isa(CastOp);

1058

1059 if (IsTrueArm) {

1061 } else if (IsZExt) {

1062 unsigned BitWidth = V->getType()->getScalarSizeInBits();

1064 } else {

1066 }

1067

1070 };

1071

1072

1073

1074 if (CondVal == A) {

1075 Value *NewTrueVal = NewFoldedConst(false, TrueVal);

1077 NewFoldedConst(true, FalseVal));

1078 }

1079

1081 Value *NewTrueVal = NewFoldedConst(true, TrueVal);

1083 NewFoldedConst(false, FalseVal));

1084 }

1085

1086 return nullptr;

1087}

1088

1090 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);

1096

1097 if (Op0)

1099 if (Op1)

1101

1102

1103

1104 if (Op0 && Op1 && LHSOpcode == RHSOpcode)

1106 return V;

1107

1108

1109

1110 if (Op0)

1114 return V;

1115

1116

1117

1118 if (Op1)

1122 return V;

1123

1124 return nullptr;

1125}

1126

1127

1128

1129

1130

1131

1133 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);

1137

1138

1140 return R;

1141

1142

1144

1145

1148

1149

1153

1154

1155 if (L && R) {

1156

1157 ++NumExpand;

1159 C->takeName(&I);

1160 return C;

1161 }

1162

1163

1165

1166 ++NumExpand;

1168 C->takeName(&I);

1169 return C;

1170 }

1171

1172

1174

1175 ++NumExpand;

1177 C->takeName(&I);

1178 return C;

1179 }

1180 }

1181

1183

1184

1187

1188

1192

1193

1194 if (L && R) {

1195

1196 ++NumExpand;

1198 A->takeName(&I);

1199 return A;

1200 }

1201

1202

1204

1205 ++NumExpand;

1207 A->takeName(&I);

1208 return A;

1209 }

1210

1211

1213

1214 ++NumExpand;

1216 A->takeName(&I);

1217 return A;

1218 }

1219 }

1220

1222}

1223

1224static std::optional<std::pair<Value *, Value *>>

1226 if (LHS->getParent() != RHS->getParent())

1227 return std::nullopt;

1228

1229 if (LHS->getNumIncomingValues() < 2)

1230 return std::nullopt;

1231

1232 if (equal(LHS->blocks(), RHS->blocks()))

1233 return std::nullopt;

1234

1235 Value *L0 = LHS->getIncomingValue(0);

1236 Value *R0 = RHS->getIncomingValue(0);

1237

1238 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {

1239 Value *L1 = LHS->getIncomingValue(I);

1240 Value *R1 = RHS->getIncomingValue(I);

1241

1242 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))

1243 continue;

1244

1245 return std::nullopt;

1246 }

1247

1248 return std::optional(std::pair(L0, R0));

1249}

1250

1251std::optional<std::pair<Value *, Value *>>

1252InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {

1253 Instruction *LHSInst = dyn_cast(LHS);

1254 Instruction *RHSInst = dyn_cast(RHS);

1255 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())

1256 return std::nullopt;

1258 case Instruction::PHI:

1260 case Instruction::Select: {

1266 return std::pair(TrueVal, FalseVal);

1267 return std::nullopt;

1268 }

1269 case Instruction::Call: {

1270

1271 MinMaxIntrinsic *LHSMinMax = dyn_cast(LHSInst);

1272 MinMaxIntrinsic *RHSMinMax = dyn_cast(RHSInst);

1273 if (LHSMinMax && RHSMinMax &&

1276 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&

1277 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||

1278 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&

1279 LHSMinMax->getRHS() == RHSMinMax->getLHS())))

1280 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());

1281 return std::nullopt;

1282 }

1283 default:

1284 return std::nullopt;

1285 }

1286}

1287

1294 if (!LHSIsSelect && !RHSIsSelect)

1295 return nullptr;

1296

1299 if (isa(&I)) {

1300 FMF = I.getFastMathFlags();

1302 }

1303

1306

1307 Value *Cond, *True = nullptr, *False = nullptr;

1308

1309

1310

1311

1312

1314

1315 if (Opcode != Instruction::Add || (!True && !False) || (True && False))

1316 return nullptr;

1317

1322 }

1326 }

1327 return nullptr;

1328 };

1329

1330 if (LHSIsSelect && RHSIsSelect && A == D) {

1331

1335

1337 if (False && !True)

1339 else if (True && !False)

1341 }

1342 } else if (LHSIsSelect && LHS->hasOneUse()) {

1343

1347 if (Value *NewSel = foldAddNegate(B, C, RHS))

1348 return NewSel;

1349 } else if (RHSIsSelect && RHS->hasOneUse()) {

1350

1354 if (Value *NewSel = foldAddNegate(E, F, LHS))

1355 return NewSel;

1356 }

1357

1358 if (!True || !False)

1359 return nullptr;

1360

1362 SI->takeName(&I);

1363 return SI;

1364}

1365

1366

1367

1369 assert(!isa(I) && "Shouldn't invert users of constant");

1371 if (U == IgnoredUser)

1372 continue;

1373 switch (cast(U)->getOpcode()) {

1374 case Instruction::Select: {

1375 auto *SI = cast(U);

1376 SI->swapValues();

1377 SI->swapProfMetadata();

1378 break;

1379 }

1380 case Instruction::Br: {

1381 BranchInst *BI = cast(U);

1383 if (BPI)

1385 break;

1386 }

1387 case Instruction::Xor:

1389

1391 break;

1392 default:

1394 "canFreelyInvertAllUsersOf() ?");

1395 }

1396 }

1397}

1398

1399

1400

1401Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {

1404 return NegV;

1405

1406

1407 if (ConstantInt *C = dyn_cast(V))

1409

1411 if (C->getType()->getElementType()->isIntegerTy())

1413

1414 if (ConstantVector *CV = dyn_cast(V)) {

1415 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {

1417 if (!Elt)

1418 return nullptr;

1419

1420 if (isa(Elt))

1421 continue;

1422

1423 if (!isa(Elt))

1424 return nullptr;

1425 }

1427 }

1428

1429

1430 if (auto *CV = dyn_cast(V))

1431 if (CV->getType()->isVectorTy() &&

1432 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())

1434

1435 return nullptr;

1436}

1437

1438

1439

1440

1441

1442

1443

1444

1445Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(

1446 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,

1448

1450 Type *IntTy = IntOps[0]->getType();

1451

1453

1454

1455 unsigned MaxRepresentableBits =

1457

1458

1459

1460 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};

1461

1462

1463

1464 auto IsNonZero = [&](unsigned OpNo) -> bool {

1465 if (OpsKnown[OpNo].hasKnownBits() &&

1466 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())

1467 return true;

1469 };

1470

1471 auto IsNonNeg = [&](unsigned OpNo) -> bool {

1472

1473

1474

1475 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();

1476 };

1477

1478

1479 auto IsValidPromotion = [&](unsigned OpNo) -> bool {

1480

1481 if (OpsFromSigned != isa(BO.getOperand(OpNo)) &&

1482 !IsNonNeg(OpNo))

1483 return false;

1484

1485

1486

1487

1488

1489

1490 if (MaxRepresentableBits < IntSz) {

1491

1492

1493

1494

1495 if (OpsFromSigned)

1496 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);

1497

1498

1499 else {

1500 NumUsedLeadingBits[OpNo] =

1501 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();

1502 }

1503 }

1504

1505

1506

1507

1508

1509 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])

1510 return false;

1511

1512 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||

1513 IsNonZero(OpNo);

1514 };

1515

1516

1517 if (Op1FpC != nullptr) {

1518

1519 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&

1521 return nullptr;

1522

1524 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,

1525 IntTy, DL);

1526 if (Op1IntC == nullptr)

1527 return nullptr;

1529 : Instruction::UIToFP,

1530 Op1IntC, FPTy, DL) != Op1FpC)

1531 return nullptr;

1532

1533

1534 IntOps[1] = Op1IntC;

1535 }

1536

1537

1538 if (IntTy != IntOps[1]->getType())

1539 return nullptr;

1540

1541 if (Op1FpC == nullptr) {

1542 if (!IsValidPromotion(1))

1543 return nullptr;

1544 }

1545 if (!IsValidPromotion(0))

1546 return nullptr;

1547

1548

1550

1551 bool NeedsOverflowCheck = true;

1552

1553

1554 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;

1555 unsigned OverflowMaxCurBits =

1556 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);

1557 bool OutputSigned = OpsFromSigned;

1559 case Instruction::FAdd:

1560 IntOpc = Instruction::Add;

1561 OverflowMaxOutputBits += OverflowMaxCurBits;

1562 break;

1563 case Instruction::FSub:

1564 IntOpc = Instruction::Sub;

1565 OverflowMaxOutputBits += OverflowMaxCurBits;

1566 break;

1567 case Instruction::FMul:

1568 IntOpc = Instruction::Mul;

1569 OverflowMaxOutputBits += OverflowMaxCurBits * 2;

1570 break;

1571 default:

1573 }

1574

1575 if (OverflowMaxOutputBits < IntSz) {

1576 NeedsOverflowCheck = false;

1577

1578

1579 if (IntOpc == Instruction::Sub)

1580 OutputSigned = true;

1581 }

1582

1583

1584

1585

1586 if (NeedsOverflowCheck &&

1587 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))

1588 return nullptr;

1589

1591 if (auto *IntBO = dyn_cast(IntBinOp)) {

1592 IntBO->setHasNoSignedWrap(OutputSigned);

1593 IntBO->setHasNoUnsignedWrap(!OutputSigned);

1594 }

1595 if (OutputSigned)

1596 return new SIToFPInst(IntBinOp, FPTy);

1597 return new UIToFPInst(IntBinOp, FPTy);

1598}

1599

1600

1601

1602

1603

1604

1606 std::array<Value *, 2> IntOps = {nullptr, nullptr};

1608

1609

1610

1613 return nullptr;

1614

1618 return nullptr;

1619

1620

1622

1623

1624

1625

1626 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, false,

1627 IntOps, Op1FpC, OpsKnown))

1628 return R;

1629 return foldFBinOpOfIntCastsFromSign(BO, true, IntOps,

1630 Op1FpC, OpsKnown);

1631}

1632

1633

1634

1635

1637

1638

1639

1645 X->getType()->isIntOrIntVectorTy(1))

1646 return nullptr;

1647

1648

1654}

1655

1657 bool IsTrueArm) {

1659 for (Value *Op : I.operands()) {

1660 Value *V = nullptr;

1661 if (Op == SI) {

1662 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();

1663 } else if (match(SI->getCondition(),

1668

1669 } else {

1670 V = Op;

1671 }

1673 }

1674

1676}

1677

1684 return Clone;

1685}

1686

1688 bool FoldWithMultiUse) {

1689

1690 if (!SI->hasOneUse() && !FoldWithMultiUse)

1691 return nullptr;

1692

1693 Value *TV = SI->getTrueValue();

1694 Value *FV = SI->getFalseValue();

1695

1696

1697 if (SI->getType()->isIntOrIntVectorTy(1))

1698 return nullptr;

1699

1700

1701

1702

1703

1704

1705

1706

1707 if (auto *CI = dyn_cast(SI->getCondition())) {

1708 if (CI->hasOneUse()) {

1709 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);

1710 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))

1711 return nullptr;

1712 }

1713 }

1714

1715

1719 if (!NewTV && !NewFV)

1720 return nullptr;

1721

1722

1723 if (!NewTV)

1725 if (!NewFV)

1727 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);

1728}

1729

1734

1735

1737 for (Value *Op : I.operands()) {

1738 if (Op == PN)

1740 else

1742 }

1743

1744

1745

1746

1747

1751 return NewVal;

1752

1753

1754

1756 const ICmpInst *ICmp = dyn_cast(&I);

1757 if (TerminatorBI && TerminatorBI->isConditional() &&

1762 DL, LHSIsTrue);

1763 if (ImpliedCond)

1765 }

1766

1767 return nullptr;

1768}

1769

1771 bool AllowMultipleUses) {

1773 if (NumPHIValues == 0)

1774 return nullptr;

1775

1776

1777

1778

1780 bool IdenticalUsers = false;

1781 if (!AllowMultipleUses && !OneUse) {

1782

1785 if (UI != &I && I.isIdenticalTo(UI))

1786 return nullptr;

1787 }

1788

1789 IdenticalUsers = true;

1790 }

1791

1792

1793 for (Value *Op : I.operands()) {

1794 if (Op == PN)

1795 continue;

1796

1797

1798 auto *I = dyn_cast(Op);

1799 if (I)

1800 continue;

1801

1802

1803 if (isa(I))

1804 if (I->getParent() == PN->getParent())

1805 continue;

1806

1807

1809 continue;

1810

1811

1812 return nullptr;

1813 }

1814

1815

1816

1819 bool SeenNonSimplifiedInVal = false;

1820 for (unsigned i = 0; i != NumPHIValues; ++i) {

1823

1826 continue;

1827 }

1828

1829

1830

1831 auto WillFold = [&]() {

1833 return false;

1834

1835

1836 const APInt *Ignored;

1837 if (isa(InVal) &&

1839 return true;

1840

1841

1842 if (isa(InVal) &&

1843 cast(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&

1846 return true;

1847

1848 return false;

1849 };

1850

1851 if (WillFold()) {

1852 OpsToMoveUseToIncomingBB.push_back(i);

1853 NewPhiValues.push_back(nullptr);

1854 continue;

1855 }

1856

1857 if (!OneUse && !IdenticalUsers)

1858 return nullptr;

1859

1860 if (SeenNonSimplifiedInVal)

1861 return nullptr;

1862 SeenNonSimplifiedInVal = true;

1863

1864

1865

1866

1867

1868

1871 return nullptr;

1872

1873 NewPhiValues.push_back(nullptr);

1874 OpsToMoveUseToIncomingBB.push_back(i);

1875

1876

1877

1878 if (isa(InVal))

1879 if (cast(InVal)->getParent() == InBB)

1880 return nullptr;

1881

1882

1883

1884

1886 return nullptr;

1887 }

1888

1889

1890

1892 for (auto OpIndex : OpsToMoveUseToIncomingBB) {

1895

1897 if (!Clone) {

1898 Clone = I.clone();

1900 if (U == PN)

1901 U = Op;

1902 else

1903 U = U->DoPHITranslation(PN->getParent(), OpBB);

1904 }

1906 Clones.insert({OpBB, Clone});

1907 }

1908

1909 NewPhiValues[OpIndex] = Clone;

1910 }

1911

1912

1917

1918 for (unsigned i = 0; i != NumPHIValues; ++i)

1920

1921 if (IdenticalUsers) {

1925 continue;

1928 }

1929 OneUse = true;

1930 }

1931

1932 if (OneUse) {

1934 const_cast<PHINode &>(*NewPN),

1935 const_cast<PHINode &>(*PN), DT);

1936 }

1938}

1939

1941

1942

1943

1944 auto *Phi0 = dyn_cast(BO.getOperand(0));

1945 auto *Phi1 = dyn_cast(BO.getOperand(1));

1946 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||

1947 Phi0->getNumOperands() != Phi1->getNumOperands())

1948 return nullptr;

1949

1950

1951 if (BO.getParent() != Phi0->getParent() ||

1952 BO.getParent() != Phi1->getParent())

1953 return nullptr;

1954

1955

1956

1957

1958

1959

1960

1961

1962

1963

1965 false);

1966 if (C) {

1968 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {

1969 auto &Phi0Use = std::get<0>(T);

1970 auto &Phi1Use = std::get<1>(T);

1971 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))

1972 return false;

1973 Value *Phi0UseV = Phi0Use.get();

1974 Value *Phi1UseV = Phi1Use.get();

1975 if (Phi0UseV == C)

1976 NewIncomingValues.push_back(Phi1UseV);

1977 else if (Phi1UseV == C)

1978 NewIncomingValues.push_back(Phi0UseV);

1979 else

1980 return false;

1981 return true;

1982 };

1983

1984 if (all_of(zip(Phi0->operands(), Phi1->operands()),

1985 CanFoldIncomingValuePair)) {

1987 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());

1988 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&

1989 "The number of collected incoming values should equal the number "

1990 "of the original PHINode operands!");

1991 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)

1992 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));

1993 return NewPhi;

1994 }

1995 }

1996

1997 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)

1998 return nullptr;

1999

2000

2004 ConstBB = Phi0->getIncomingBlock(0);

2005 OtherBB = Phi0->getIncomingBlock(1);

2007 ConstBB = Phi0->getIncomingBlock(1);

2008 OtherBB = Phi0->getIncomingBlock(0);

2009 } else {

2010 return nullptr;

2011 }

2012 if (match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))

2013 return nullptr;

2014

2015

2016

2017

2018 auto *PredBlockBranch = dyn_cast(OtherBB->getTerminator());

2019 if (!PredBlockBranch || PredBlockBranch->isConditional() ||

2021 return nullptr;

2022

2023

2024

2025

2026 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)

2028 return nullptr;

2029

2030

2032 if (!NewC)

2033 return nullptr;

2034

2035

2036

2039 Phi0->getIncomingValueForBlock(OtherBB),

2040 Phi1->getIncomingValueForBlock(OtherBB));

2041 if (auto *NotFoldedNewBO = dyn_cast(NewBO))

2042 NotFoldedNewBO->copyIRFlags(&BO);

2043

2044

2048 return NewPhi;

2049}

2050

2052 if (!isa(I.getOperand(1)))

2053 return nullptr;

2054

2055 if (auto *Sel = dyn_cast(I.getOperand(0))) {

2057 return NewSel;

2058 } else if (auto *PN = dyn_cast(I.getOperand(0))) {

2060 return NewPhi;

2061 }

2062 return nullptr;

2063}

2064

2066

2067

2068

2069 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&

2070 !Src.hasOneUse())

2071 return false;

2072 return true;

2073}

2074

2076 if (!isa(Inst.getType()))

2077 return nullptr;

2078

2081 assert(cast(LHS->getType())->getElementCount() ==

2082 cast(Inst.getType())->getElementCount());

2083 assert(cast(RHS->getType())->getElementCount() ==

2084 cast(Inst.getType())->getElementCount());

2085

2086

2087

2088

2089 Value *L0, *L1, *R0, *R1;

2094 cast(LHS)->isConcat() &&

2095 cast(RHS)->isConcat()) {

2096

2097

2098

2099

2100

2102 if (auto *BO = dyn_cast(NewBO0))

2105 if (auto *BO = dyn_cast(NewBO1))

2108 }

2109

2110 auto createBinOpReverse = [&](Value *X, Value *Y) {

2112 if (auto *BO = dyn_cast(V))

2116 M, Intrinsic::vector_reverse, V->getType());

2118 };

2119

2120

2121

2122

2125

2129 return createBinOpReverse(V1, V2);

2130

2131

2133 return createBinOpReverse(V1, RHS);

2134 }

2135

2137 return createBinOpReverse(LHS, V2);

2138

2139

2140

2141

2143 return nullptr;

2144

2147 if (auto *BO = dyn_cast(XY))

2150 };

2151

2152

2153

2156 V1->getType() == V2->getType() &&

2158

2159 return createBinOpShuffle(V1, V2, Mask);

2160 }

2161

2162

2163

2168 auto *LShuf = cast(LHS);

2169 auto *RShuf = cast(RHS);

2170

2171

2172

2173

2174 if (LShuf->isSelect() &&

2176 RShuf->isSelect() &&

2178

2179

2180

2181

2184 return NewBO;

2185 }

2186 }

2187

2188

2189

2190

2191

2192

2194 auto *InstVTy = dyn_cast(Inst.getType());

2195 if (InstVTy &&

2199 cast(V1->getType())->getNumElements() <=

2200 InstVTy->getNumElements()) {

2202 "Shuffle should not change scalar type");

2203

2204

2205

2206

2207

2208

2209 bool ConstOp1 = isa(RHS);

2211 unsigned SrcVecNumElts =

2212 cast(V1->getType())->getNumElements();

2215 bool MayChange = true;

2216 unsigned NumElts = InstVTy->getNumElements();

2217 for (unsigned I = 0; I < NumElts; ++I) {

2218 Constant *CElt = C->getAggregateElement(I);

2219 if (ShMask[I] >= 0) {

2220 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");

2221 Constant *NewCElt = NewVecC[ShMask[I]];

2222

2223

2224

2225

2226

2227

2228 if (!CElt || (!isa(NewCElt) && NewCElt != CElt) ||

2229 I >= SrcVecNumElts) {

2230 MayChange = false;

2231 break;

2232 }

2233 NewVecC[ShMask[I]] = CElt;

2234 }

2235

2236

2237

2238

2239

2240

2241

2242

2243

2244 if (I >= SrcVecNumElts || ShMask[I] < 0) {

2246 ConstOp1

2249 if (!MaybePoison || !isa(MaybePoison)) {

2250 MayChange = false;

2251 break;

2252 }

2253 }

2254 }

2255 if (MayChange) {

2257

2258

2259

2260

2263

2264

2265

2266 Value *NewLHS = ConstOp1 ? V1 : NewC;

2267 Value *NewRHS = ConstOp1 ? NewC : V1;

2268 return createBinOpShuffle(NewLHS, NewRHS, Mask);

2269 }

2270 }

2271

2272

2274

2275 if (isa(RHS))

2277

2280 int SplatIndex;

2285 X->getType() != Inst.getType() ||

2287 return nullptr;

2288

2289

2290

2291

2295 return nullptr;

2296 }

2297

2298

2299

2300

2305

2306

2307

2308 if (isa(R)) {

2309 R->copyFastMathFlags(&Inst);

2310 R->andIRFlags(RHS);

2311 }

2312 if (auto *NewInstBO = dyn_cast(NewBO))

2313 NewInstBO->copyIRFlags(R);

2314 return R;

2315 }

2316

2317 return nullptr;

2318}

2319

2320

2321

2322

2324

2326

2327

2328

2329 if (BO.getOpcode() == Instruction::Sub)

2331

2335 return nullptr;

2336

2337

2338

2339 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;

2342 cast(Op1)->getOpcode() == CastOpc &&

2343 (Op0->hasOneUse() || Op1->hasOneUse()))) {

2344

2345

2348 return nullptr;

2350 if (!NarrowC)

2351 return nullptr;

2352 Y = NarrowC;

2353 }

2354

2355

2356 if (BO.getOpcode() == Instruction::Sub)

2358

2359

2360

2361 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))

2362 return nullptr;

2363

2364

2365

2367 if (auto *NewBinOp = dyn_cast(NarrowBO)) {

2368 if (IsSext)

2369 NewBinOp->setHasNoSignedWrap();

2370 else

2371 NewBinOp->setHasNoUnsignedWrap();

2372 }

2374}

2375

2376

2377

2381}

2382

2383

2384

2387 if (GEP.hasAllConstantIndices())

2388 return nullptr;

2389

2396 return nullptr;

2397

2398

2399

2400

2403 Type *Ty = GEP.getSourceElementType();

2404 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);

2405 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);

2407}

2408

2409

2410

2411

2415 if (GEP.getNumIndices() != 1)

2416 return nullptr;

2419 const APInt *C1;

2421 return nullptr;

2422 Value *VarIndex;

2423 const APInt *C2;

2424 Type *PtrTy = Src->getType()->getScalarType();

2425 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);

2427 return nullptr;

2428 if (C1->getBitWidth() != IndexSizeInBits ||

2430 return nullptr;

2432 if (isa(BaseType))

2433 return nullptr;

2436 if (NewOffset.isZero() ||

2437 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {

2438 Value *GEPConst =

2441 }

2442

2443 return nullptr;

2444}

2445

2448

2449

2450

2452 return nullptr;

2453

2455 return I;

2456

2457

2458 Type *PtrTy = Src->getType()->getScalarType();

2459 if (GEP.hasAllConstantIndices() &&

2460 (Src->hasOneUse() || Src->hasAllConstantIndices())) {

2461

2464 bool IsFirstType = true;

2465 unsigned NumVarIndices = 0;

2466 for (auto Pair : enumerate(Src->indices())) {

2467 if (!isa(Pair.value())) {

2469 IsFirstType = false;

2470 NumVarIndices = Pair.index() + 1;

2471 }

2472 ++GTI;

2473 }

2474

2475

2477 if (NumVarIndices != Src->getNumIndices()) {

2478

2479 if (BaseType->isScalableTy())

2480 return nullptr;

2481

2483 if (!IsFirstType)

2488 }

2489

2490

2491 if (GEP.accumulateConstantOffset(DL, Offset))

2492 return nullptr;

2493

2494

2497 if (Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))

2498 return nullptr;

2499

2503 Src->getNumIndices() - NumVarIndices));

2505 Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));

2506

2507

2508

2509

2510 if (Idx.isNonNegative() != ConstIndices[0].isNonNegative())

2512 if (Idx.isNonNegative())

2514 }

2515

2518 Indices, "", NW));

2519 }

2520

2521 if (Src->getResultElementType() != GEP.getSourceElementType())

2522 return nullptr;

2523

2525

2526

2527 bool EndsWithSequential = false;

2529 I != E; ++I)

2530 EndsWithSequential = I.isSequential();

2531

2532

2533 if (EndsWithSequential) {

2534

2535

2536 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);

2537 Value *GO1 = GEP.getOperand(1);

2538

2539

2540

2541

2542

2544 return nullptr;

2545

2548

2549

2550 if (Sum == nullptr)

2551 return nullptr;

2552

2553 Indices.append(Src->op_begin()+1, Src->op_end()-1);

2555 Indices.append(GEP.op_begin()+2, GEP.op_end());

2556 } else if (isa(*GEP.idx_begin()) &&

2557 cast(*GEP.idx_begin())->isNullValue() &&

2558 Src->getNumOperands() != 1) {

2559

2560 Indices.append(Src->op_begin()+1, Src->op_end());

2561 Indices.append(GEP.idx_begin()+1, GEP.idx_end());

2562 }

2563

2564 if (!Indices.empty())

2567 Src->getSourceElementType(), Src->getOperand(0), Indices, "",

2569

2570 return nullptr;

2571}

2572

2575 bool &DoesConsume, unsigned Depth) {

2576 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));

2577

2580 DoesConsume = true;

2581 return A;

2582 }

2583

2585

2588

2590 return nullptr;

2591

2592

2593

2594 if (!WillInvertAllUses)

2595 return nullptr;

2596

2597

2598

2599 if (auto *I = dyn_cast(V)) {

2601 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),

2602 I->getOperand(1));

2603 return NonNull;

2604 }

2605

2606

2607

2610 DoesConsume, Depth))

2613 DoesConsume, Depth))

2615 return nullptr;

2616 }

2617

2618

2619

2622 DoesConsume, Depth))

2625 DoesConsume, Depth))

2627 return nullptr;

2628 }

2629

2630

2631

2634 DoesConsume, Depth))

2636 return nullptr;

2637 }

2638

2639

2640

2643 DoesConsume, Depth))

2645 return nullptr;

2646 }

2647

2649

2650

2653

2655 bool LocalDoesConsume = DoesConsume;

2657 LocalDoesConsume, Depth))

2658 return nullptr;

2660 LocalDoesConsume, Depth)) {

2661 DoesConsume = LocalDoesConsume;

2662 if (Builder != nullptr) {

2664 DoesConsume, Depth);

2665 assert(NotB != nullptr &&

2666 "Unable to build inverted value for known freely invertable op");

2667 if (auto *II = dyn_cast(V))

2671 }

2672 return NonNull;

2673 }

2674 }

2675

2676 if (PHINode *PN = dyn_cast(V)) {

2677 bool LocalDoesConsume = DoesConsume;

2679 for (Use &U : PN->operands()) {

2680 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);

2682 U.get(), false,

2684 if (NewIncomingVal == nullptr)

2685 return nullptr;

2686

2687 if (NewIncomingVal == V)

2688 return nullptr;

2690 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);

2691 }

2692

2693 DoesConsume = LocalDoesConsume;

2694 if (Builder != nullptr) {

2698 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());

2699 for (auto [Val, Pred] : IncomingValues)

2701 return NewPN;

2702 }

2703 return NonNull;

2704 }

2705

2708 DoesConsume, Depth))

2710 return nullptr;

2711 }

2712

2715 DoesConsume, Depth))

2717 return nullptr;

2718 }

2719

2720

2721

2722

2724 bool IsLogical, Value *A,

2726 bool LocalDoesConsume = DoesConsume;

2728 LocalDoesConsume, Depth))

2729 return nullptr;

2731 LocalDoesConsume, Depth)) {

2733 LocalDoesConsume, Depth);

2734 DoesConsume = LocalDoesConsume;

2735 if (IsLogical)

2738 }

2739

2740 return nullptr;

2741 };

2742

2744 return TryInvertAndOrUsingDeMorgan(Instruction::And, false, A,

2745 B);

2746

2748 return TryInvertAndOrUsingDeMorgan(Instruction::Or, false, A,

2749 B);

2750

2752 return TryInvertAndOrUsingDeMorgan(Instruction::And, true, A,

2753 B);

2754

2756 return TryInvertAndOrUsingDeMorgan(Instruction::Or, true, A,

2757 B);

2758

2759 return nullptr;

2760}

2761

2762

2764 Value *PtrOp = GEP.getOperand(0);

2765 Type *GEPEltType = GEP.getSourceElementType();

2767 return false;

2768

2769

2770

2772 return true;

2773

2774

2775

2776 if (GEP.getNumIndices() == 1 &&

2780 return true;

2781

2782

2783

2784 auto PtrOpGep = dyn_cast(PtrOp);

2785 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&

2787 const APInt *C;

2788 return match(V, m_APInt(C)) && !C->isZero();

2789 });

2790}

2791

2794 auto *Op1 = dyn_cast(PN->getOperand(0));

2795 if (!Op1)

2796 return nullptr;

2797

2798

2799

2800

2801

2802

2803

2804 if (Op1 == &GEP)

2805 return nullptr;

2807

2808 int DI = -1;

2809

2811 auto *Op2 = dyn_cast(*I);

2812 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||

2813 Op1->getSourceElementType() != Op2->getSourceElementType())

2814 return nullptr;

2815

2816

2817 if (Op2 == &GEP)

2818 return nullptr;

2819

2820

2821 Type *CurTy = nullptr;

2822

2823 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {

2824 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())

2825 return nullptr;

2826

2827 if (Op1->getOperand(J) != Op2->getOperand(J)) {

2828 if (DI == -1) {

2829

2830

2831

2832

2833

2834

2835 if (J > 1) {

2836 assert(CurTy && "No current type?");

2838 return nullptr;

2839 }

2840

2841 DI = J;

2842 } else {

2843

2844

2845

2846

2847

2848

2849 return nullptr;

2850 }

2851 }

2852

2853

2854 if (J > 0) {

2855 if (J == 1) {

2856 CurTy = Op1->getSourceElementType();

2857 } else {

2858 CurTy =

2860 }

2861 }

2862 }

2863

2864 NW &= Op2->getNoWrapFlags();

2865 }

2866

2867

2868

2869

2870 if (DI != -1 && !PN->hasOneUse())

2871 return nullptr;

2872

2873 auto *NewGEP = cast(Op1->clone());

2874 NewGEP->setNoWrapFlags(NW);

2875

2876 if (DI == -1) {

2877

2878

2879 } else {

2880

2881

2882

2884 {

2887 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),

2889 }

2890

2892 NewPN->addIncoming(cast(I)->getOperand(DI),

2894

2895 NewGEP->setOperand(DI, NewPN);

2896 }

2897

2898 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());

2899 return NewGEP;

2900}

2901

2903 Value *PtrOp = GEP.getOperand(0);

2905 Type *GEPType = GEP.getType();

2906 Type *GEPEltType = GEP.getSourceElementType();

2911

2912

2913

2914

2915 if (auto *GEPFVTy = dyn_cast(GEPType)) {

2916 auto VWidth = GEPFVTy->getNumElements();

2917 APInt PoisonElts(VWidth, 0);

2920 PoisonElts)) {

2921 if (V != &GEP)

2923 return &GEP;

2924 }

2925

2926

2927

2928

2929 }

2930

2931

2932

2933 bool MadeChange = false;

2934

2935

2936

2937 Type *NewScalarIndexTy =

2939

2942 ++I, ++GTI) {

2943

2945 continue;

2946

2947 Type *IndexTy = (*I)->getType();

2948 Type *NewIndexType =

2951 cast(IndexTy)->getElementCount())

2952 : NewScalarIndexTy;

2953

2954

2955

2958 if (!isa(*I) || match(I->get(), m_Zero())) {

2960 MadeChange = true;

2961 }

2962

2963 if (IndexTy != NewIndexType) {

2964

2965

2966

2968 MadeChange = true;

2969 }

2970 }

2971 if (MadeChange)

2972 return &GEP;

2973

2974

2975 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {

2977 if (GEP.accumulateConstantOffset(DL, Offset))

2980 GEP.getNoWrapFlags()));

2981 }

2982

2984 Value *Offset = EmitGEPOffset(cast(&GEP));

2988 }

2989

2990

2991 if (auto *PN = dyn_cast(PtrOp)) {

2994 }

2995

2996 if (auto *Src = dyn_cast(PtrOp))

2998 return I;

2999

3000 if (GEP.getNumIndices() == 1) {

3001 unsigned AS = GEP.getPointerAddressSpace();

3002 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==

3005

3006 if (TyAllocSize == 1) {

3007

3008

3009

3010

3011 Value *X = GEP.getPointerOperand();

3013 if (match(GEP.getOperand(1),

3015 GEPType == Y->getType()) {

3016 bool HasSameUnderlyingObject =

3018 bool Changed = false;

3019 GEP.replaceUsesWithIf(Y, [&](Use &U) {

3020 bool ShouldReplace = HasSameUnderlyingObject ||

3021 isa(U.getUser()) ||

3022 isa(U.getUser());

3023 Changed |= ShouldReplace;

3024 return ShouldReplace;

3025 });

3026 return Changed ? &GEP : nullptr;

3027 }

3028 } else if (auto *ExactIns =

3029 dyn_cast(GEP.getOperand(1))) {

3030

3032 if (ExactIns->isExact()) {

3040 GEP.getPointerOperand(), V,

3041 GEP.getNoWrapFlags());

3042 }

3043 }

3044 if (ExactIns->isExact() && ExactIns->hasOneUse()) {

3045

3046

3047

3048

3050 std::optional NewC;

3059 if (Rem == 0)

3060 NewC = Quot;

3063 int64_t Rem;

3065

3066 if (!Quot.isAllOnes() && Rem == 0)

3067 NewC = Quot;

3068 }

3069

3070 if (NewC.has_value()) {

3073 ConstantInt::get(V->getType(), *NewC));

3074 cast(NewOp)->setIsExact();

3076 GEP.getPointerOperand(), NewOp,

3077 GEP.getNoWrapFlags());

3078 }

3079 }

3080 }

3081 }

3082 }

3083

3085 return nullptr;

3086

3087 if (GEP.getNumIndices() == 1) {

3088

3089

3090 auto CanPreserveInBounds = [&](bool AddIsNSW, Value *Idx1, Value *Idx2) {

3094 };

3095

3096

3097 Value *Idx1, *Idx2;

3098 if (match(GEP.getOperand(1),

3100

3101

3102

3103

3104

3105 bool IsInBounds = CanPreserveInBounds(

3106 cast(GEP.getOperand(1))->hasNoSignedWrap(),

3107 Idx1, Idx2);

3108 auto *NewPtr =

3110 Idx1, "", IsInBounds);

3113 IsInBounds));

3114 }

3118

3119

3120

3121

3122

3123

3124 bool IsInBounds = CanPreserveInBounds(

3125 true, Idx1, C);

3127 GEP.getSourceElementType(), GEP.getPointerOperand(),

3129 IsInBounds);

3134 "", IsInBounds));

3135 }

3136 }

3137

3138 if (GEP.isInBounds()) {

3139 unsigned IdxWidth =

3141 APInt BasePtrOffset(IdxWidth, 0);

3142 Value *UnderlyingPtrOp =

3144 BasePtrOffset);

3145 bool CanBeNull, CanBeFreed;

3147 DL, CanBeNull, CanBeFreed);

3148 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {

3149 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&

3151 APInt AllocSize(IdxWidth, DerefBytes);

3152 if (BasePtrOffset.ule(AllocSize)) {

3154 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());

3155 }

3156 }

3157 }

3158 }

3159

3160

3161 if (GEP.hasNoUnsignedSignedWrap() && GEP.hasNoUnsignedWrap() &&

3163 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));

3164 })) {

3166 return &GEP;

3167 }

3168

3170 return R;

3171

3172 return nullptr;

3173}

3174

3177 if (isa(V))

3178 return true;

3179 if (auto *LI = dyn_cast(V))

3180 return isa(LI->getPointerOperand());

3181

3183}

3184

3185

3186

3190

3191 return false;

3192

3194

3195 return false;

3196

3198 return false;

3199

3200

3201

3202

3204 return Dest && Dest->Ptr == UsedV;

3205}

3206

3213

3214 do {

3218 switch (I->getOpcode()) {

3219 default:

3220

3221 return false;

3222

3223 case Instruction::AddrSpaceCast:

3224 case Instruction::BitCast:

3225 case Instruction::GetElementPtr:

3226 Users.emplace_back(I);

3228 continue;

3229

3230 case Instruction::ICmp: {

3231 ICmpInst *ICI = cast(I);

3232

3233

3234

3236 return false;

3237 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;

3239 return false;

3240

3241

3242

3243

3244 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {

3245

3246

3247

3248 const APInt *Alignment;

3250 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&

3252 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();

3253 };

3254 auto *CB = dyn_cast(AI);

3256 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&

3257 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&

3258 !AlignmentAndSizeKnownValid(CB))

3259 return false;

3260 Users.emplace_back(I);

3261 continue;

3262 }

3263

3264 case Instruction::Call:

3265

3267 switch (II->getIntrinsicID()) {

3268 default:

3269 return false;

3270

3271 case Intrinsic::memmove:

3272 case Intrinsic::memcpy:

3273 case Intrinsic::memset: {

3275 if (MI->isVolatile() || MI->getRawDest() != PI)

3276 return false;

3277 [[fallthrough]];

3278 }

3279 case Intrinsic::assume:

3280 case Intrinsic::invariant_start:

3281 case Intrinsic::invariant_end:

3282 case Intrinsic::lifetime_start:

3283 case Intrinsic::lifetime_end:

3284 case Intrinsic::objectsize:

3285 Users.emplace_back(I);

3286 continue;

3287 case Intrinsic::launder_invariant_group:

3288 case Intrinsic::strip_invariant_group:

3289 Users.emplace_back(I);

3291 continue;

3292 }

3293 }

3294

3296 Users.emplace_back(I);

3297 continue;

3298 }

3299

3303 Users.emplace_back(I);

3304 continue;

3305 }

3306

3310 Users.emplace_back(I);

3312 continue;

3313 }

3314

3315 return false;

3316

3317 case Instruction::Store: {

3319 if (SI->isVolatile() || SI->getPointerOperand() != PI)

3320 return false;

3321 Users.emplace_back(I);

3322 continue;

3323 }

3324 }

3326 }

3327 } while (!Worklist.empty());

3328 return true;

3329}

3330

3333

3334

3335

3336

3337

3338

3339

3340

3341

3342

3343

3345

3346

3347

3350 std::unique_ptr DIB;

3351 if (isa(MI)) {

3353 DIB.reset(new DIBuilder(*MI.getModule(), false));

3354 }

3355

3357 for (unsigned i = 0, e = Users.size(); i != e; ++i) {

3358

3359

3361 continue;

3362

3364

3366 if (II->getIntrinsicID() == Intrinsic::objectsize) {

3369 II, DL, &TLI, AA, true, &InsertedInstructions);

3370 for (Instruction *Inserted : InsertedInstructions)

3374 Users[i] = nullptr;

3375 }

3376 }

3377 }

3378 for (unsigned i = 0, e = Users.size(); i != e; ++i) {

3380 continue;

3381

3383

3384 if (ICmpInst *C = dyn_cast(I)) {

3387 C->isFalseWhenEqual()));

3388 } else if (auto *SI = dyn_cast(I)) {

3389 for (auto *DVI : DVIs)

3390 if (DVI->isAddressOfVariable())

3392 for (auto *DVR : DVRs)

3393 if (DVR->isAddressOfVariable())

3395 } else {

3396

3397

3399 }

3401 }

3402

3404

3405 Module *M = II->getModule();

3408 II->getParent());

3409 }

3410

3411

3412

3413

3414

3415

3416

3417

3418

3419

3420

3421

3422

3423

3424

3425

3426

3427

3428

3429

3430

3431

3432

3433

3434

3435

3436 for (auto *DVI : DVIs)

3437 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())

3438 DVI->eraseFromParent();

3439 for (auto *DVR : DVRs)

3440 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())

3441 DVR->eraseFromParent();

3442

3444 }

3445 return nullptr;

3446}

3447

3448

3449

3450

3451

3452

3453

3454

3455

3456

3457

3458

3459

3460

3461

3462

3468

3469

3470

3471

3472

3473 if (!PredBB)

3474 return nullptr;

3475

3476

3477

3481 return nullptr;

3482

3483

3484

3485

3486

3487 if (FreeInstrBB->size() != 2) {

3489 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)

3490 continue;

3491 auto *Cast = dyn_cast(&Inst);

3492 if (!Cast || !Cast->isNoopCast(DL))

3493 return nullptr;

3494 }

3495 }

3496

3504 TrueBB, FalseBB)))

3505 return nullptr;

3507 return nullptr;

3508

3509

3511 return nullptr;

3513 "Broken CFG: missing edge from predecessor to successor");

3514

3515

3516

3518 if (&Instr == FreeInstrBBTerminator)

3519 break;

3520 Instr.moveBeforePreserving(TI);

3521 }

3523 "Only the branch instruction should remain");

3524

3525

3526

3527

3528

3529

3530

3531

3532

3534 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);

3535 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);

3536 if (Dereferenceable.isValid()) {

3538 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,

3539 Attribute::Dereferenceable);

3540 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);

3541 }

3543

3544 return &FI;

3545}

3546

3548

3549 if (isa(Op)) {

3550

3553 }

3554

3555

3556

3557 if (isa(Op))

3559

3560

3561

3562 CallInst *CI = dyn_cast(Op);

3566

3567

3568

3569

3570

3571

3572

3573

3574

3575

3576

3581 return I;

3582 }

3583

3584 return nullptr;

3585}

3586

3590 return nullptr;

3591

3593 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();

3594 if (ReturnClass == fcNone)

3595 return nullptr;

3596

3598 Value *Simplified =

3600 if (!Simplified)

3601 return nullptr;

3602

3604}

3605

3606

3608

3609

3610

3611 bool Changed = false;

3612 while (Instruction *Prev = I.getPrevNonDebugInstruction()) {

3613

3614

3615

3616

3617 if (Prev->isEHPad())

3618 break;

3619

3621 break;

3622

3623

3624

3625

3626

3629 Changed = true;

3630 }

3631 return Changed;

3632}

3633

3636 return nullptr;

3637}

3638

3641

3642

3643

3644

3645

3648 return BBI->isDebugOrPseudoInst() ||

3649 (isa(BBI) && BBI->getType()->isPointerTy());

3650 };

3651

3653 do {

3654 if (BBI != FirstInstr)

3655 --BBI;

3656 } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));

3657

3658 return dyn_cast(BBI);

3659 };

3660

3663 return &BI;

3664

3665 return nullptr;

3666}

3667

3670 if (DeadEdges.insert({From, To}).second)

3671 return;

3672

3673

3675 for (Use &U : PN.incoming_values())

3676 if (PN.getIncomingBlock(U) == From && !isa(U)) {

3680 }

3681

3683}

3684

3685

3686

3692 std::next(I->getReverseIterator())))) {

3693 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {

3696 }

3697 if (Inst.isEHPad() || Inst.getType()->isTokenTy())

3698 continue;

3699

3700 Inst.dropDbgRecords();

3703 }

3704

3708 for (Value *V : Changed)

3710 }

3711

3712

3715}

3716

3723 }))

3724 continue;

3725

3727 }

3728}

3729

3734

3735 if (Succ == LiveSucc)

3736 continue;

3737

3739 }

3740

3742}

3743

3747

3748

3752

3754 if (BPI)

3757 }

3758

3759

3760

3761

3763 if (isa(Cond) &&

3769 if (BPI)

3772 }

3773

3774

3775

3778

3779

3783

3784 auto *Cmp = cast(Cond);

3787 if (BPI)

3790 return &BI;

3791 }

3792

3793 if (isa(Cond)) {

3795 return nullptr;

3796 }

3797 if (auto *CI = dyn_cast(Cond)) {

3800 return nullptr;

3801 }

3802

3803

3804

3805

3812 continue;

3813 }

3818 }

3819 }

3820 }

3821

3823 return nullptr;

3824}

3825

3826

3827

3828

3831 bool IsTrueArm) {

3832 unsigned CstOpIdx = IsTrueArm ? 1 : 2;

3833 auto *C = dyn_cast(Select->getOperand(CstOpIdx));

3834 if (C)

3835 return nullptr;

3836

3837 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();

3838 if (CstBB != SI.getDefaultDest())

3839 return nullptr;

3840 Value *X = Select->getOperand(3 - CstOpIdx);

3842 const APInt *RHSC;

3845 return nullptr;

3846 if (IsTrueArm)

3848

3849

3851 for (auto Case : SI.cases())

3852 if (!CR.contains(Case.getCaseValue()->getValue()))

3853 return nullptr;

3854

3855 return X;

3856}

3857

3859 Value *Cond = SI.getCondition();

3863

3864 for (auto Case : SI.cases()) {

3866 assert(isa(NewCase) &&

3867 "Result of expression should be constant");

3868 Case.setValue(cast(NewCase));

3869 }

3871 }

3872

3875

3876 for (auto Case : SI.cases()) {

3878 assert(isa(NewCase) &&

3879 "Result of expression should be constant");

3880 Case.setValue(cast(NewCase));

3881 }

3883 }

3884

3888 all_of(SI.cases(), [&](const auto &Case) {

3889 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;

3890 })) {

3891

3895 Value *NewCond = Op0;

3897

3901 }

3902 for (auto Case : SI.cases()) {

3903 const APInt &CaseVal = Case.getCaseValue()->getValue();

3905 : CaseVal.lshr(ShiftAmt);

3906 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));

3907 }

3909 }

3910 }

3911

3912

3914 bool IsZExt = isa(Cond);

3917

3918 if (all_of(SI.cases(), [&](const auto &Case) {

3919 const APInt &CaseVal = Case.getCaseValue()->getValue();

3920 return IsZExt ? CaseVal.isIntN(NewWidth)

3921 : CaseVal.isSignedIntN(NewWidth);

3922 })) {

3923 for (auto &Case : SI.cases()) {

3924 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);

3925 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));

3926 }

3928 }

3929 }

3930

3931

3932 if (auto *Select = dyn_cast(Cond)) {

3939 }

3940

3944

3945

3946

3947 for (const auto &C : SI.cases()) {

3948 LeadingKnownZeros =

3949 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());

3950 LeadingKnownOnes =

3951 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());

3952 }

3953

3954 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);

3955

3956

3957

3958

3959

3960 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&

3961 shouldChangeType(Known.getBitWidth(), NewWidth)) {

3965

3966 for (auto Case : SI.cases()) {

3967 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);

3968 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));

3969 }

3971 }

3972

3973 if (isa(Cond)) {

3975 return nullptr;

3976 }

3977 if (auto *CI = dyn_cast(Cond)) {

3979 SI.findCaseValue(CI)->getCaseSuccessor());

3980 return nullptr;

3981 }

3982

3983 return nullptr;

3984}

3985

3987InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {

3989 if (!WO)

3990 return nullptr;

3991

3993 const APInt *C = nullptr;

3995 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||

3996 OvID == Intrinsic::umul_with_overflow)) {

3997

3998 if (C->isAllOnes())

4000

4001 if (C->isPowerOf2()) {

4002 return BinaryOperator::CreateShl(

4003 WO->getLHS(),

4004 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));

4005 }

4006 }

4007 }

4008

4009

4010

4011

4012 if (!WO->hasOneUse())

4013 return nullptr;

4014

4015

4016

4019 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();

4020

4024 }

4025

4026 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");

4027

4028

4029 if (OvID == Intrinsic::usub_with_overflow)

4031

4032

4033

4034 if (OvID == Intrinsic::smul_with_overflow &&

4035 WO->getLHS()->getType()->isIntOrIntVectorTy(1))

4036 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());

4037

4038

4039 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {

4040 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();

4041

4045 ConstantInt::get(WO->getLHS()->getType(),

4047 }

4048

4049

4050

4051

4052 if (C) {

4053

4054

4056 WO->getBinaryOp(), *C, WO->getNoWrapKind());

4057

4061 auto *OpTy = WO->getRHS()->getType();

4062 auto *NewLHS = WO->getLHS();

4066 ConstantInt::get(OpTy, NewRHSC));

4067 }

4068

4069 return nullptr;

4070}

4071

4074

4077

4081

4083

4084 const unsigned *exti, *exte, *insi, *inse;

4085 for (exti = EV.idx_begin(), insi = IV->idx_begin(),

4086 exte = EV.idx_end(), inse = IV->idx_end();

4087 exti != exte && insi != inse;

4088 ++exti, ++insi) {

4089 if (*insi != *exti)

4090

4091

4092

4093

4094

4095

4096

4097

4100 }

4101 if (exti == exte && insi == inse)

4102

4103

4104

4105

4107 if (exti == exte) {

4108

4109

4110

4111

4112

4113

4114

4115

4120 }

4121 if (insi == inse)

4122

4123

4124

4125

4126

4127

4128

4129

4132 }

4133

4134 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))

4135 return R;

4136

4137 if (LoadInst *L = dyn_cast(Agg)) {

4138

4139 if (auto *STy = dyn_cast(Agg->getType());

4140 STy && STy->isScalableTy())

4141 return nullptr;

4142

4143

4144

4145

4146

4147

4148 if (L->isSimple() && L->hasOneUse()) {

4149

4151

4155

4156

4157

4160 L->getPointerOperand(), Indices);

4162

4163

4165

4166

4168 }

4169 }

4170

4171 if (auto *PN = dyn_cast(Agg))

4173 return Res;

4174

4175

4176

4177 if (auto *SI = dyn_cast(Agg))

4179 return R;

4180

4181

4182

4183

4184

4185

4186

4187

4188

4189 return nullptr;

4190}

4191

4192

4194 switch (Personality) {

4198

4199

4200 return false;

4202 return false;

4204

4205

4206 return false;

4218 }

4220}

4221

4223 return

4224 cast(LHS->getType())->getNumElements()

4225 <

4226 cast(RHS->getType())->getNumElements();

4227}

4228

4230

4231

4232

4235

4236

4237

4238 bool MakeNewInstruction = false;

4240 bool CleanupFlag = LI.isCleanup();

4241

4243 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {

4244 bool isLastClause = i + 1 == e;

4246

4249

4250

4251

4252 if (AlreadyCaught.insert(TypeInfo).second) {

4253

4254 NewClauses.push_back(CatchClause);

4255 } else {

4256

4257 MakeNewInstruction = true;

4258 }

4259

4260

4261

4262 if (isCatchAll(Personality, TypeInfo)) {

4263 if (!isLastClause)

4264 MakeNewInstruction = true;

4265 CleanupFlag = false;

4266 break;

4267 }

4268 } else {

4269

4270

4271

4272

4273

4274

4275

4276 assert(LI.isFilter(i) && "Unsupported landingpad clause!");

4278 ArrayType *FilterType = cast(FilterClause->getType());

4279 unsigned NumTypeInfos = FilterType->getNumElements();

4280

4281

4282

4283

4284 if (!NumTypeInfos) {

4285 NewClauses.push_back(FilterClause);

4286 if (!isLastClause)

4287 MakeNewInstruction = true;

4288 CleanupFlag = false;

4289 break;

4290 }

4291

4292 bool MakeNewFilter = false;

4294 if (isa(FilterClause)) {

4295

4296 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");

4299

4300 if (isCatchAll(Personality, TypeInfo)) {

4301

4302 MakeNewInstruction = true;

4303 continue;

4304 }

4305

4306

4307

4308 NewFilterElts.push_back(TypeInfo);

4309 if (NumTypeInfos > 1)

4310 MakeNewFilter = true;

4311 } else {

4314 NewFilterElts.reserve(NumTypeInfos);

4315

4316

4317

4318

4319 bool SawCatchAll = false;

4320 for (unsigned j = 0; j != NumTypeInfos; ++j) {

4323 if (isCatchAll(Personality, TypeInfo)) {

4324

4325 SawCatchAll = true;

4326 break;

4327 }

4328

4329

4330

4331

4332

4333

4334

4335

4336

4337

4338

4339

4340

4341

4342

4343

4344

4345

4346

4347

4348 if (SeenInFilter.insert(TypeInfo).second)

4349 NewFilterElts.push_back(cast(Elt));

4350 }

4351

4352 if (SawCatchAll) {

4353

4354 MakeNewInstruction = true;

4355 continue;

4356 }

4357

4358

4359 if (NewFilterElts.size() < NumTypeInfos)

4360 MakeNewFilter = true;

4361 }

4362 if (MakeNewFilter) {

4364 NewFilterElts.size());

4366 MakeNewInstruction = true;

4367 }

4368

4369 NewClauses.push_back(FilterClause);

4370

4371

4372

4373

4374

4375 if (MakeNewFilter && !NewFilterElts.size()) {

4376 assert(MakeNewInstruction && "New filter but not a new instruction!");

4377 CleanupFlag = false;

4378 break;

4379 }

4380 }

4381 }

4382

4383

4384

4385

4386

4387

4388 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {

4389 unsigned j;

4390

4391 for (j = i; j != e; ++j)

4392 if (!isa(NewClauses[j]->getType()))

4393 break;

4394

4395

4396

4397

4398 for (unsigned k = i; k + 1 < j; ++k)

4399 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {

4400

4401

4402 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,

4404 MakeNewInstruction = true;

4405 break;

4406 }

4407

4408

4409 i = j + 1;

4410 }

4411

4412

4413

4414

4415

4416

4417

4418

4419

4420

4421

4422

4423 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {

4424

4426 ArrayType *FTy = dyn_cast(Filter->getType());

4427 if (!FTy)

4428

4429 continue;

4431

4432

4433 for (unsigned j = NewClauses.size() - 1; j != i; --j) {

4434 Value *LFilter = NewClauses[j];

4436 if (!LTy)

4437

4438 continue;

4439

4440

4442

4443 if (!FElts) {

4444

4445 NewClauses.erase(J);

4446 MakeNewInstruction = true;

4447

4448 continue;

4449 }

4451

4452 if (FElts > LElts)

4453

4454 continue;

4455

4456 if (isa(LFilter)) {

4457

4458

4459 if (isa(Filter)) {

4460 assert(FElts <= LElts && "Should have handled this case earlier!");

4461

4462 NewClauses.erase(J);

4463 MakeNewInstruction = true;

4464 }

4465

4466 continue;

4467 }

4468 ConstantArray *LArray = cast(LFilter);

4469 if (isa(Filter)) {

4470

4471

4472 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");

4473 for (unsigned l = 0; l != LElts; ++l)

4474 if (LArray->getOperand(l)->isNullValue()) {

4475

4476 NewClauses.erase(J);

4477 MakeNewInstruction = true;

4478 break;

4479 }

4480

4481 continue;

4482 }

4483

4484

4485

4486

4488 bool AllFound = true;

4489 for (unsigned f = 0; f != FElts; ++f) {

4491 AllFound = false;

4492 for (unsigned l = 0; l != LElts; ++l) {

4494 if (LTypeInfo == FTypeInfo) {

4495 AllFound = true;

4496 break;

4497 }

4498 }

4499 if (!AllFound)

4500 break;

4501 }

4502 if (AllFound) {

4503

4504 NewClauses.erase(J);

4505 MakeNewInstruction = true;

4506 }

4507

4508 }

4509 }

4510

4511

4512

4513 if (MakeNewInstruction) {

4515 NewClauses.size());

4518

4519

4520

4521 if (NewClauses.empty())

4522 CleanupFlag = true;

4524 return NLI;

4525 }

4526

4527

4528

4529 if (LI.isCleanup() != CleanupFlag) {

4530 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");

4532 return &LI;

4533 }

4534

4535 return nullptr;

4536}

4537

4540

4541

4542

4543

4544

4545

4546

4547

4548

4549

4550

4551

4552

4553

4554 auto *OrigOp = OrigFI.getOperand(0);

4555 auto *OrigOpInst = dyn_cast(OrigOp);

4556

4557

4558

4559

4560 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa(OrigOp))

4561 return nullptr;

4562

4563

4564

4565

4566

4568 false))

4569 return nullptr;

4570

4571

4572

4573

4574 Use *MaybePoisonOperand = nullptr;

4575 for (Use &U : OrigOpInst->operands()) {

4576 if (isa(U.get()) ||

4578 continue;

4579 if (!MaybePoisonOperand)

4580 MaybePoisonOperand = &U;

4581 else

4582 return nullptr;

4583 }

4584

4585 OrigOpInst->dropPoisonGeneratingAnnotations();

4586

4587

4588 if (!MaybePoisonOperand)

4589 return OrigOp;

4590

4593 MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr");

4594

4595 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);

4596 return OrigOp;

4597}

4598

4601

4602

4603

4604

4605

4606 Use *StartU = nullptr;

4610

4611 Worklist.push_back(U.get());

4612 continue;

4613 }

4614

4615

4616 if (StartU)

4617 return nullptr;

4618 StartU = &U;

4619 }

4620

4621 if (!StartU || Worklist.empty())

4622 return nullptr;

4623

4624 Value *StartV = StartU->get();

4627

4628

4629 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)

4630 return nullptr;

4631

4636 if (!Visited.insert(V).second)

4637 continue;

4638

4639 if (Visited.size() > 32)

4640 return nullptr;

4641

4642

4644 continue;

4645

4648 false))

4649 return nullptr;

4650

4653 }

4654

4656 I->dropPoisonGeneratingAnnotations();

4657

4658 if (StartNeedsFreeze) {

4661 StartV->getName() + ".fr");

4663 }

4665}

4666

4669

4670 if (isa(Op) || Op->hasOneUse())

4671 return false;

4672

4673

4674

4675

4676

4677

4679 if (isa(Op)) {

4680 MoveBefore =

4682 } else {

4683 auto MoveBeforeOpt = cast(Op)->getInsertionPointAfterDef();

4684 if (!MoveBeforeOpt)

4685 return false;

4686 MoveBefore = *MoveBeforeOpt;

4687 }

4688

4689

4690 if (isa(MoveBefore))

4691 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();

4692

4693

4694 MoveBefore.setHeadBit(false);

4695

4696 bool Changed = false;

4697 if (&FI != &*MoveBefore) {

4698 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);

4699 Changed = true;

4700 }

4701

4702 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {

4704 Changed |= Dominates;

4705 return Dominates;

4706 });

4707

4708 return Changed;

4709}

4710

4711

4713 for (auto *U : V->users()) {

4714 if (isa(U))

4715 return true;

4717 return true;

4718 }

4719 return false;

4720}

4721

4723 Value *Op0 = I.getOperand(0);

4724

4727

4728

4729 if (auto *PN = dyn_cast(Op0)) {

4731 return NV;

4733 return NV;

4734 }

4735

4738

4739

4740

4741

4742

4743

4744

4745

4746

4747

4748

4749

4750

4751

4752 auto getUndefReplacement = [&I](Type *Ty) {

4753 Constant *BestValue = nullptr;

4755 for (const auto *U : I.users()) {

4761

4762 if (!BestValue)

4763 BestValue = C;

4764 else if (BestValue != C)

4765 BestValue = NullValue;

4766 }

4767 assert(BestValue && "Must have at least one use");

4768 return BestValue;

4769 };

4770

4772

4773

4774

4776 return nullptr;

4778 }

4779

4781 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement()) {

4782 Constant *ReplaceC = getUndefReplacement(I.getType()->getScalarType());

4784 }

4785

4786

4788 return &I;

4789

4790 return nullptr;

4791}

4792

4793

4794

4795

4797 auto *CB = dyn_cast(I);

4798 if (!CB)

4799

4800

4801

4802 return false;

4804 if (!Dest)

4805 return false;

4807 if (!AI)

4808

4809 return false;

4810

4811

4812

4815 auto pushUsers = [&](const Instruction &I) {

4816 for (const User *U : I.users()) {

4817 if (Visited.insert(U).second)

4819 }

4820 };

4821 pushUsers(*AI);

4822 while (!AllocaUsers.empty()) {

4823 auto *UserI = cast(AllocaUsers.pop_back_val());

4824 if (isa(UserI) || isa(UserI)) {

4825 pushUsers(*UserI);

4826 continue;

4827 }

4828 if (UserI == CB)

4829 continue;

4830

4831 return false;

4832 }

4833 return true;

4834}

4835

4836

4837

4838

4839

4843

4844

4845 if (isa(I) || I->isEHPad() || I->mayThrow() || I->willReturn() ||

4846 I->isTerminator())

4847 return false;

4848

4849

4850

4851

4852

4853 if (isa(I))

4854 return false;

4855

4856

4857 if (isa(DestBlock->getTerminator()))

4858 return false;

4859

4860

4861 if (auto *CI = dyn_cast(I)) {

4862 if (CI->isConvergent())

4863 return false;

4864 }

4865

4866

4867

4868 if (I->mayWriteToMemory()) {

4870 return false;

4871 }

4872

4873

4874

4875 if (I->mayReadFromMemory() &&

4876 I->hasMetadata(LLVMContext::MD_invariant_load)) {

4877

4878

4879

4881 return false;

4883 E = I->getParent()->end();

4884 Scan != E; ++Scan)

4885 if (Scan->mayWriteToMemory())

4886 return false;

4887 }

4888

4889 I->dropDroppableUses([&](const Use *U) {

4890 auto *I = dyn_cast(U->getUser());

4891 if (I && I->getParent() != DestBlock) {

4893 return true;

4894 }

4895 return false;

4896 });

4897

4898

4899

4901 I->moveBefore(*DestBlock, InsertPos);

4902 ++NumSunkInst;

4903

4904

4905

4906

4907

4908

4912 if (!DbgUsers.empty())

4914 if (!DbgVariableRecords.empty())

4916 DbgVariableRecords);

4917

4918

4919

4920

4921

4922

4923

4924

4925

4926

4927 return true;

4928}

4929

4933

4934

4936 for (auto &DbgUser : DbgUsers)

4937 if (DbgUser->getParent() != DestBlock)

4938 DbgUsersToSalvage.push_back(DbgUser);

4939

4940

4941

4944 if (DVI->getParent() == SrcBlock)

4947 [](auto *A, auto *B) { return B->comesBefore(A); });

4948

4951 for (auto *User : DbgUsersToSink) {

4952

4953

4954

4955

4956 if (isa(User))

4957 continue;

4958

4961 User->getDebugLoc()->getInlinedAt());

4962

4963 if (!SunkVariables.insert(DbgUserVariable).second)

4964 continue;

4965

4966

4967

4968 if (isa(User))

4969 continue;

4970

4971 DIIClones.emplace_back(cast(User->clone()));

4972 if (isa(User) && isa(I))

4973 DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0));

4974 LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n');

4975 }

4976

4977

4978 if (!DIIClones.empty()) {

4980

4981

4982 for (auto &DIIClone : llvm::reverse(DIIClones)) {

4983 DIIClone->insertBefore(&*InsertPos);

4984 LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n');

4985 }

4986 }

4987}

4988

4993

4994

4995

4996

4998 for (auto &DVR : DbgVariableRecords)

4999 if (DVR->getParent() != DestBlock)

5000 DbgVariableRecordsToSalvage.push_back(DVR);

5001

5002

5003

5006 if (DVR->getParent() == SrcBlock)

5007 DbgVariableRecordsToSink.push_back(DVR);

5008

5009

5010

5011

5012

5014 return B->getInstruction()->comesBefore(A->getInstruction());

5015 };

5017

5018

5019

5020

5021 using InstVarPair = std::pair<const Instruction *, DebugVariable>;

5023 if (DbgVariableRecordsToSink.size() > 1) {

5025

5028 DebugVariable(DVR->getVariable(), DVR->getExpression(),

5029 DVR->getDebugLoc()->getInlinedAt());

5030 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;

5031 }

5032

5033

5034

5036 for (auto It : CountMap) {

5037 if (It.second > 1) {

5038 FilterOutMap[It.first] = nullptr;

5039 DupSet.insert(It.first.first);

5040 }

5041 }

5042

5043

5044

5045 for (const Instruction *Inst : DupSet) {

5049 DebugVariable(DVR.getVariable(), DVR.getExpression(),

5050 DVR.getDebugLoc()->getInlinedAt());

5051 auto FilterIt =

5052 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));

5053 if (FilterIt == FilterOutMap.end())

5054 continue;

5055 if (FilterIt->second != nullptr)

5056 continue;

5057 FilterIt->second = &DVR;

5058 }

5059 }

5060 }

5061

5062

5063

5068 continue;

5069

5071 DebugVariable(DVR->getVariable(), DVR->getExpression(),

5072 DVR->getDebugLoc()->getInlinedAt());

5073

5074

5075

5076 if (!FilterOutMap.empty()) {

5077 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);

5078 auto It = FilterOutMap.find(IVP);

5079

5080

5081 if (It != FilterOutMap.end() && It->second != DVR)

5082 continue;

5083 }

5084

5085 if (!SunkVariables.insert(DbgUserVariable).second)

5086 continue;

5087

5088 if (DVR->isDbgAssign())

5089 continue;

5090

5093 }

5094

5095

5096 if (DVRClones.empty())

5097 return;

5098

5100

5101

5102

5103

5104

5105

5106

5107

5108

5109

5110 assert(InsertPos.getHeadBit());

5112 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);

5113 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');

5114 }

5115}

5116

5119

5120

5122

5123

5124

5125

5128 ++NumDeadInst;

5129 continue;

5130 }

5131

5133 }

5134

5136 if (I == nullptr) continue;

5137

5138

5141 ++NumDeadInst;

5142 continue;

5143 }

5144

5146 continue;

5147

5148

5149

5150

5151 auto getOptionalSinkBlockForInst =

5152 [this](Instruction *I) -> std::optional<BasicBlock *> {

5154 return std::nullopt;

5155

5158 unsigned NumUsers = 0;

5159

5160 for (Use &U : I->uses()) {

5163 continue;

5165 return std::nullopt;

5166

5168

5170 if (PHINode *PN = dyn_cast(UserInst))

5171 UserBB = PN->getIncomingBlock(U);

5172

5173

5174

5175 if (UserParent && UserParent != UserBB)

5176 return std::nullopt;

5177 UserParent = UserBB;

5178

5179

5180

5181 if (NumUsers == 0) {

5182

5183

5185 return std::nullopt;

5186

5188

5189

5190

5191

5192

5193

5194

5195

5197 return std::nullopt;

5198

5199 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");

5200 }

5201

5202 NumUsers++;

5203 }

5204

5205

5206 if (!UserParent)

5207 return std::nullopt;

5208

5209 return UserParent;

5210 };

5211

5212 auto OptBB = getOptionalSinkBlockForInst(I);

5213 if (OptBB) {

5214 auto *UserParent = *OptBB;

5215

5219

5220

5221

5222 for (Use &U : I->operands())

5223 if (Instruction *OpI = dyn_cast(U.get()))

5225 }

5226 }

5227

5228

5231 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});

5232

5233#ifndef NDEBUG

5234 std::string OrigI;

5235#endif

5237 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');

5238

5240 ++NumCombined;

5241

5242 if (Result != I) {

5244 << " New = " << *Result << '\n');

5245

5246

5247

5248

5249 if (!Result->getDebugLoc())

5250 Result->setDebugLoc(I->getDebugLoc());

5251

5252 Result->copyMetadata(*I, LLVMContext::MD_annotation);

5253

5254 I->replaceAllUsesWith(Result);

5255

5256

5257 Result->takeName(I);

5258

5259

5260 BasicBlock *InstParent = I->getParent();

5262

5263

5264 if (isa(Result) != isa(I)) {

5265

5266 if (isa(I))

5268 else

5270 }

5271

5272 Result->insertInto(InstParent, InsertPos);

5273

5274

5277

5279 } else {

5281 << " New = " << *I << '\n');

5282

5283

5284

5287 } else {

5290 }

5291 }

5293 }

5294 }

5295

5298}

5299

5300

5301

5302

5303

5304

5305

5309

5310public:

5312

5313 if (I->hasMetadataOtherThanDebugLoc())

5314 return;

5315

5316 auto Track = [](Metadata *ScopeList, auto &Container) {

5317 const auto *MDScopeList = dyn_cast_or_null(ScopeList);

5318 if (!MDScopeList || !Container.insert(MDScopeList).second)

5319 return;

5320 for (const auto &MDOperand : MDScopeList->operands())

5321 if (auto *MDScope = dyn_cast(MDOperand))

5322 Container.insert(MDScope);

5323 };

5324

5325 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);

5326 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);

5327 }

5328

5331 if (!Decl)

5332 return false;

5333

5335 "llvm.experimental.noalias.scope.decl in use ?");

5338 "llvm.experimental.noalias.scope should refer to a single scope");

5340 if (auto *MD = dyn_cast(MDOperand))

5341 return !UsedAliasScopesAndLists.contains(MD) ||

5342 !UsedNoAliasScopesAndLists.contains(MD);

5343

5344

5345 return true;

5346 }

5347};

5348

5349

5350

5351

5352

5353

5354

5355

5356

5363

5366 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)

5367 for (PHINode &PN : Succ->phis())

5368 for (Use &U : PN.incoming_values())

5369 if (PN.getIncomingBlock(U) == BB && !isa(U)) {

5372 }

5373 };

5374

5378 })) {

5379 HandleOnlyLiveSuccessor(BB, nullptr);

5380 continue;

5381 }

5382 LiveBlocks.insert(BB);

5383

5385

5386 if (!Inst.use_empty() &&

5387 (Inst.getNumOperands() == 0 || isa(Inst.getOperand(0))))

5389 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst

5390 << '\n');

5391 Inst.replaceAllUsesWith(C);

5392 ++NumConstProp;

5394 Inst.eraseFromParent();

5396 continue;

5397 }

5398

5399

5400 for (Use &U : Inst.operands()) {

5401 if (!isa(U) && !isa(U))

5402 continue;

5403

5404 auto *C = cast(U);

5405 Constant *&FoldRes = FoldedConstants[C];

5406 if (!FoldRes)

5408

5409 if (FoldRes != C) {

5410 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst

5411 << "\n Old = " << *C

5412 << "\n New = " << *FoldRes << '\n');

5413 U = FoldRes;

5415 }

5416 }

5417

5418

5419

5420

5421 if (!Inst.isDebugOrPseudoInst()) {

5422 InstrsForInstructionWorklist.push_back(&Inst);

5423 SeenAliasScopes.analyse(&Inst);

5424 }

5425 }

5426

5427

5428

5431 if (isa(BI->getCondition())) {

5432

5433 HandleOnlyLiveSuccessor(BB, nullptr);

5434 continue;

5435 }

5436 if (auto *Cond = dyn_cast(BI->getCondition())) {

5437 bool CondVal = Cond->getZExtValue();

5438 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));

5439 continue;

5440 }

5441 } else if (SwitchInst *SI = dyn_cast(TI)) {

5442 if (isa(SI->getCondition())) {

5443

5444 HandleOnlyLiveSuccessor(BB, nullptr);

5445 continue;

5446 }

5447 if (auto *Cond = dyn_cast(SI->getCondition())) {

5448 HandleOnlyLiveSuccessor(BB,

5449 SI->findCaseValue(Cond)->getCaseSuccessor());

5450 continue;

5451 }

5452 }

5453 }

5454

5455

5456

5457

5459 if (LiveBlocks.count(&BB))

5460 continue;

5461

5462 unsigned NumDeadInstInBB;

5463 unsigned NumDeadDbgInstInBB;

5464 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =

5466

5467 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;

5468 NumDeadInst += NumDeadInstInBB;

5469 }

5470

5471

5472

5473

5474

5475

5478

5479

5482 ++NumDeadInst;

5485 Inst->eraseFromParent();

5487 continue;

5488 }

5489

5491 }

5492

5494}

5495

5497

5504 }

5506}

5507

5514 auto &DL = F.getDataLayout();

5516 F.hasFnAttribute("instcombine-no-verify-fixpoint");

5517

5518

5519

5523 Worklist.add(I);

5524 if (auto *Assume = dyn_cast(I))

5526 }));

5527

5529

5530

5531

5532 bool MadeIRChange = false;

5535

5536

5537 unsigned Iteration = 0;

5538 while (true) {

5539 ++Iteration;

5540

5541 if (Iteration > Opts.MaxIterations && !VerifyFixpoint) {

5543 << " on " << F.getName()

5544 << " reached; stopping without verifying fixpoint\n");

5545 break;

5546 }

5547

5548 ++NumWorklistIterations;

5549 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "

5550 << F.getName() << "\n");

5551

5552 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,

5553 ORE, BFI, BPI, PSI, DL, RPOT);

5556 MadeChangeInThisIteration |= IC.run();

5557 if (!MadeChangeInThisIteration)

5558 break;

5559

5560 MadeIRChange = true;

5563 "Instruction Combining on " + Twine(F.getName()) +

5565 " iterations. " +

5566 "Use 'instcombine' or function attribute "

5567 "'instcombine-no-verify-fixpoint' to suppress this error.",

5568 false);

5569 }

5570 }

5571

5572 if (Iteration == 1)

5573 ++NumOneIteration;

5574 else if (Iteration == 2)

5575 ++NumTwoIterations;

5576 else if (Iteration == 3)

5577 ++NumThreeIterations;

5578 else

5579 ++NumFourOrMoreIterations;

5580

5581 return MadeIRChange;

5582}

5583

5585

5589 OS, MapClassName2PassName);

5590 OS << '<';

5591 OS << "max-iterations=" << Options.MaxIterations << ";";

5592 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";

5593 OS << '>';

5594}

5595

5596char InstCombinePass::ID = 0;

5597

5601

5602 if (LRT.shouldSkip(&ID))

5604

5610

5615 auto *BFI = (PSI && PSI->hasProfileSummary()) ?

5618

5620 BFI, BPI, PSI, Options)) {

5621

5622 LRT.update(&ID, false);

5624 }

5625

5626

5628 LRT.update(&ID, true);

5631 return PA;

5632}

5633

5648}

5649

5652 return false;

5653

5654

5655 auto AA = &getAnalysis().getAAResults();

5656 auto &AC = getAnalysis().getAssumptionCache(F);

5657 auto &TLI = getAnalysis().getTLI(F);

5658 auto &TTI = getAnalysis().getTTI(F);

5659 auto &DT = getAnalysis().getDomTree();

5660 auto &ORE = getAnalysis().getORE();

5661

5662

5664 &getAnalysis().getPSI();

5667 &getAnalysis().getBFI() :

5668 nullptr;

5670 if (auto *WrapperPass =

5671 getAnalysisIfAvailable())

5672 BPI = &WrapperPass->getBPI();

5673

5676}

5677

5679

5682}

5683

5685 "Combine redundant instructions", false, false)

5697

5698

5701}

5702

5705}

AMDGPU Register Bank Select

This file implements a class to represent arbitrary precision integral constant values and operations...

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Expand Atomic instructions

static const Function * getParent(const Value *V)

This is the interface for LLVM's primary stateless and local alias analysis.

BlockVerifier::State From

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")

This file contains the declarations for the subclasses of Constant, which represent the different fla...

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

This file provides an implementation of debug counters.

#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)

This file defines the DenseMap class.

static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")

static bool isSigned(unsigned int Opcode)

This is the interface for a simple mod/ref and alias analysis over globals.

This file provides various utilities for inspecting and working with the control flow graph in LLVM I...

This header defines various interfaces for pass management in LLVM.

This defines the Use class.

iv Induction Variable Users

static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)

Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".

This file provides internal interfaces used to implement the InstCombine.

This file provides the primary interface to the instcombine pass.

static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)

static bool isUsedWithinShuffleVector(Value *V)

static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)

static bool shorter_filter(const Value *LHS, const Value *RHS)

static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)

Thread a GEP operation with constant indices through the constant true/false arms of a select.

static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)

static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))

static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))

static bool hasNoSignedWrap(BinaryOperator &I)

static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)

Combine constant operands of associative operations either before or after a cast to eliminate one of...

static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)

static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)

static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)

Return true if we should canonicalize the gep to an i8 ptradd.

static void ClearSubclassDataAfterReassociation(BinaryOperator &I)

Conservatively clears subclassOptionalData after a reassociation or commutation.

static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)

static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)

This function returns identity value for given opcode, which can be used to factor patterns like (X *...

static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)

static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)

static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)

static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)

Move the call to free before a NULL test.

static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)

static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)

Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".

static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)

This tries to simplify binary operations by factorizing out common terms (e.

static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)

Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...

static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)

This function predicates factorization using distributive laws.

static bool hasNoUnsignedWrap(BinaryOperator &I)

static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)

Check for case where the call writes to an otherwise dead alloca.

static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))

static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)

static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)

Return 'true' if the given typeinfo will match anything.

static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))

static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)

static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)

Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.

uint64_t IntrinsicInst * II

static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")

static bool IsSelect(MachineInstr &MI)

#define INITIALIZE_PASS_DEPENDENCY(depName)

#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)

#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)

const SmallVectorImpl< MachineOperand > & Cond

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

This file defines the SmallPtrSet class.

This file defines the SmallVector class.

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

static unsigned getScalarSizeInBits(Type *Ty)

static SymbolRef::Type getType(const Symbol *Sym)

This pass exposes codegen information to IR-level passes.

static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)

Returns the opcode of Values or ~0 if they do not all agree.

static const uint32_t IV[8]

bool isNoAliasScopeDeclDead(Instruction *Inst)

void analyse(Instruction *I)

A manager for alias analyses.

A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.

Class for arbitrary precision integers.

static APInt getAllOnes(unsigned numBits)

Return an APInt of a specified width with all bits set.

static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)

Dual division/remainder interface.

bool isMinSignedValue() const

Determine if this is the smallest signed value.

static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)

APInt trunc(unsigned width) const

Truncate to new width.

bool isAllOnes() const

Determine if all bits are set. This is true for zero-width values.

bool isZero() const

Determine if this value is zero, i.e. all bits are clear.

unsigned getBitWidth() const

Return the number of bits in the APInt.

APInt sadd_ov(const APInt &RHS, bool &Overflow) const

APInt ashr(unsigned ShiftAmt) const

Arithmetic right-shift function.

APInt smul_ov(const APInt &RHS, bool &Overflow) const

bool isNonNegative() const

Determine if this APInt Value is non-negative (>= 0)

bool ule(const APInt &RHS) const

Unsigned less or equal comparison.

bool isPowerOf2() const

Check if this APInt's value is a power of two greater than zero.

static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)

Constructs an APInt value that has the bottom loBitsSet bits set.

APInt ssub_ov(const APInt &RHS, bool &Overflow) const

APInt lshr(unsigned shiftAmt) const

Logical right-shift function.

A container for analyses that lazily runs them and caches their results.

PassT::Result * getCachedResult(IRUnitT &IR) const

Get the cached result of an analysis pass for a given IR unit.

PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)

Get the result of an analysis pass for a given IR unit.

Represent the analysis usage information of a pass.

AnalysisUsage & addRequired()

AnalysisUsage & addPreserved()

Add the specified Pass class to the set of analyses preserved by this pass.

void setPreservesCFG()

This function should be called by the pass, iff they do not:

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

Class to represent array types.

uint64_t getNumElements() const

static ArrayType * get(Type *ElementType, uint64_t NumElements)

This static method is the primary way to construct an ArrayType.

Type * getElementType() const

A function analysis which provides an AssumptionCache.

An immutable pass that tracks lazily created AssumptionCache objects.

A cache of @llvm.assume calls within a function.

void registerAssumption(AssumeInst *CI)

Add an @llvm.assume intrinsic to this function's cache.

uint64_t getDereferenceableBytes() const

Returns the number of dereferenceable bytes from the dereferenceable attribute.

bool isValid() const

Return true if the attribute is any kind of attribute.

Legacy wrapper pass to provide the BasicAAResult object.

LLVM Basic Block Representation.

iterator_range< const_phi_iterator > phis() const

Returns a range that iterates over the phis in the basic block.

const_iterator getFirstInsertionPt() const

Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...

iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const

Return a const iterator range over the instructions in the block, skipping any debug instructions.

InstListType::const_iterator getFirstNonPHIIt() const

Iterator returning form of getFirstNonPHI.

const Instruction & front() const

bool isEntryBlock() const

Return true if this is the entry block of the containing function.

const BasicBlock * getSinglePredecessor() const

Return the predecessor of this block if it has a single predecessor block.

const BasicBlock * getUniquePredecessor() const

Return the predecessor of this block if it has a unique predecessor block.

InstListType::iterator iterator

Instruction iterators...

const_iterator getFirstNonPHIOrDbgOrAlloca() const

Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...

const Instruction * getTerminator() const LLVM_READONLY

Returns the terminator instruction if the block is well formed or null if the block is not well forme...

static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...

BinaryOps getOpcode() const

static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)

Construct a binary instruction, given the opcode and the two operands.

static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")

Analysis pass which computes BlockFrequencyInfo.

BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...

Conditional or Unconditional Branch instruction.

void swapSuccessors()

Swap the successors of this branch instruction.

bool isConditional() const

BasicBlock * getSuccessor(unsigned i) const

bool isUnconditional() const

Value * getCondition() const

Analysis pass which computes BranchProbabilityInfo.

Analysis providing branch probability information.

void swapSuccEdgesProbabilities(const BasicBlock *Src)

Swap outgoing edges probabilities for Src with branch terminator.

Represents analyses that only rely on functions' control flow.

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

void setAttributes(AttributeList A)

Set the attributes for this call.

bool doesNotThrow() const

Determine if the call cannot unwind.

Value * getArgOperand(unsigned i) const

AttributeList getAttributes() const

Return the attributes for this call.

This class represents a function call, abstracting a target machine's calling convention.

static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

@ ICMP_UGT

unsigned greater than

@ ICMP_ULT

unsigned less than

Predicate getSwappedPredicate() const

For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.

Predicate getInversePredicate() const

For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...

An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...

ConstantArray - Constant Array Declarations.

static Constant * get(ArrayType *T, ArrayRef< Constant * > V)

A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...

static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)

static Constant * getNot(Constant *C)

static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)

static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)

Return the identity constant for a binary opcode.

static Constant * getNeg(Constant *C, bool HasNSW=false)

This is the shared class of boolean and integer constants.

static ConstantInt * getTrue(LLVMContext &Context)

static ConstantInt * getFalse(LLVMContext &Context)

static ConstantInt * getBool(LLVMContext &Context, bool V)

This class represents a range of values.

bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const

Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.

static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)

Produce the exact range such that all values in the returned range satisfy the given predicate with a...

bool contains(const APInt &Val) const

Return true if the specified value is in the set.

static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)

Produce the range that contains X if and only if "X BinOp Other" does not wrap.

Constant Vector Declarations.

static Constant * get(ArrayRef< Constant * > V)

This is an important base class in LLVM.

static Constant * getIntegerValue(Type *Ty, const APInt &V)

Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...

static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)

Try to replace undefined constant C or undefined elements in C with Replacement.

static Constant * getAllOnesValue(Type *Ty)

const Constant * stripPointerCasts() const

static Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

Constant * getAggregateElement(unsigned Elt) const

For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...

bool isNullValue() const

Return true if this is the value that would be returned by getNullValue.

This class represents an Operation in the Expression.

A parsed version of the target data layout string in and methods for querying it.

SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const

Get GEP indices to access Offset inside ElemTy.

bool isLegalInteger(uint64_t Width) const

Returns true if the specified type is known to be a native integer type supported by the CPU.

unsigned getIndexTypeSizeInBits(Type *Ty) const

Layout size of the index used in GEP calculation.

IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const

Returns the type of a GEP index in AddressSpace.

TypeSize getTypeAllocSize(Type *Ty) const

Returns the offset in bytes between successive objects of the specified type, including alignment pad...

unsigned getIndexSizeInBits(unsigned AS) const

Size in bits of index used for address calculation in getelementptr.

TypeSize getTypeSizeInBits(Type *Ty) const

Size examples:

int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const

Returns the offset from the beginning of the type for the specified indices.

This is the common base class for debug info intrinsics for variables.

Record of a variable value-assignment, aka a non instruction representation of the dbg....

static bool shouldExecute(unsigned CounterName)

Identifies a unique instance of a variable.

ValueT lookup(const_arg_type_t< KeyT > Val) const

lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...

iterator find(const_arg_type_t< KeyT > Val)

std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)

void registerBranch(BranchInst *BI)

Add a branch condition to the cache.

Analysis pass which computes a DominatorTree.

Legacy analysis pass which computes a DominatorTree.

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

bool isReachableFromEntry(const Use &U) const

Provide an overload for a Use.

bool dominates(const BasicBlock *BB, const Use &U) const

Return true if the (end of the) basic block BB dominates the use U.

Utility class for floating point operations which can have information about relaxed accuracy require...

Convenience struct for specifying and reasoning about fast-math flags.

This class represents a freeze function that returns random concrete value if an operand is either a ...

FunctionPass class - This class is used to implement most global optimizations.

bool skipFunction(const Function &F) const

Optional passes call this function to check whether the pass should be skipped.

const BasicBlock & getEntryBlock() const

Represents flags for the getelementptr instruction/expression.

GEPNoWrapFlags withoutNoUnsignedSignedWrap() const

static GEPNoWrapFlags noUnsignedWrap()

GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const

Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).

GEPNoWrapFlags withoutNoUnsignedWrap() const

GEPNoWrapFlags getNoWrapFlags() const

an instruction for type-safe pointer arithmetic to access elements of arrays and structs

static Type * getTypeAtIndex(Type *Ty, Value *Idx)

Return the type of the element at the given index of an indexable type.

static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Create an "inbounds" getelementptr.

Legacy wrapper pass to provide the GlobalsAAResult object.

This instruction compares its operands according to the predicate given to the constructor.

CmpPredicate getCmpPredicate() const

static bool isEquality(Predicate P)

Return true if this predicate is either EQ or NE.

Common base class shared among various IRBuilders.

Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")

Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")

Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)

Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")

Value * CreateFreeze(Value *V, const Twine &Name="")

Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())

void setFastMathFlags(FastMathFlags NewFMF)

Set the fast-math flags to be used with generated fp-math operators.

Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")

Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())

void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)

Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.

Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")

Create a call to intrinsic ID with 2 operands which is mangled on the first type.

CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")

Create a call to intrinsic ID with Args, mangled using Types.

ConstantInt * getInt32(uint32_t C)

Get a constant 32-bit value.

Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)

PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")

Value * CreateNot(Value *V, const Twine &Name="")

Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)

LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)

Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...

Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")

Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")

Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)

Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)

Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)

Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")

void SetInsertPoint(BasicBlock *TheBB)

This specifies that created instructions should be appended to the end of the specified block.

Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)

Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")

Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")

Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")

IntegerType * getInt8Ty()

Fetch the type representing an 8-bit integer.

ConstantInt * getInt(const APInt &AI)

Get a constant integer value.

Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...

This instruction inserts a struct field of array element value into an aggregate value.

static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

InstCombinePass(InstCombineOptions Opts={})

void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)

PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)

Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)

Given an instruction with a select as one operand and a constant as the other operand,...

Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)

Tries to simplify binops of select and cast of the select condition.

Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)

This is a convenience wrapper function for the above two functions.

bool SimplifyAssociativeOrCommutative(BinaryOperator &I)

Performs a few simplifications for operators which are associative or commutative.

Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)

Value * foldUsingDistributiveLaws(BinaryOperator &I)

Tries to simplify binary operations which some other binary operation distributes over.

Instruction * foldBinOpShiftWithShift(BinaryOperator &I)

Instruction * visitUnreachableInst(UnreachableInst &I)

Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)

Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...

void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)

Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override

The specified value produces a vector with any number of elements.

Instruction * visitFreeze(FreezeInst &I)

void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)

bool prepareWorklist(Function &F)

Perform early cleanup and prepare the InstCombine worklist.

Instruction * visitFree(CallInst &FI, Value *FreedOp)

Instruction * visitExtractValueInst(ExtractValueInst &EV)

void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)

Instruction * visitUnconditionalBranchInst(BranchInst &BI)

Instruction * eraseInstFromFunction(Instruction &I) override

Combiner aware instruction erasure.

Instruction * visitLandingPadInst(LandingPadInst &LI)

Instruction * visitReturnInst(ReturnInst &RI)

Instruction * visitSwitchInst(SwitchInst &SI)

Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)

For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.

Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)

Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth, Instruction *CxtI)

Attempts to replace V with a simpler value based on the demanded floating-point classes.

bool mergeStoreIntoSuccessor(StoreInst &SI)

Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...

Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)

void tryToSinkInstructionDbgValues(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableIntrinsic * > &DbgUsers)

void CreateNonTerminatorUnreachable(Instruction *InsertAt)

Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...

Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)

bool run()

Run the combiner over the entire worklist until it is empty.

Instruction * foldVectorBinop(BinaryOperator &Inst)

Canonicalize the position of binops relative to shufflevector.

bool removeInstructionsBeforeUnreachable(Instruction &I)

Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)

void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)

void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)

Instruction * visitAllocSite(Instruction &FI)

Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)

Instruction * visitBranchInst(BranchInst &BI)

Value * tryFactorizationFolds(BinaryOperator &I)

This tries to simplify binary operations by factorizing out common terms (e.

Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)

bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)

Try to move the specified instruction from its current block into the beginning of DestBlock,...

bool freezeOtherUses(FreezeInst &FI)

void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)

Freely adapt every user of V as-if V was changed to !V.

The core instruction combiner logic.

const DataLayout & getDataLayout() const

bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)

Return true if the specified value is free to invert (apply ~ to).

static unsigned getComplexity(Value *V)

Assign a complexity or rank value to LLVM Values.

Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)

Inserts an instruction New before instruction Old.

Instruction * replaceInstUsesWith(Instruction &I, Value *V)

A combiner-aware RAUW-like routine.

uint64_t MaxArraySizeForCombine

Maximum size of array considered when transforming.

static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)

void replaceUse(Use &U, Value *NewValue)

Replace use and add the previously used value to the worklist.

static bool isCanonicalPredicate(CmpPredicate Pred)

Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...

InstructionWorklist & Worklist

A worklist of the instructions that need to be simplified.

Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)

Same as InsertNewInstBefore, but also sets the debug loc.

BranchProbabilityInfo * BPI

ReversePostOrderTraversal< BasicBlock * > & RPOT

unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const

std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)

void addToWorklist(Instruction *I)

Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)

Return nonnull value if V is free to invert under the condition of WillInvertAllUses.

SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges

Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...

std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)

Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)

Replace operand of instruction and add old operand to the worklist.

static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)

Some binary operators require special handling to avoid poison and undefined behavior.

SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges

Edges that are known to never be taken.

std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)

void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const

bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const

Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)

bool isBackEdge(const BasicBlock *From, const BasicBlock *To)

void visit(Iterator Start, Iterator End)

The legacy pass manager's instcombine pass.

InstructionCombiningPass()

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...

bool runOnFunction(Function &F) override

runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.

InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...

Instruction * removeOne()

void pushUsersToWorkList(Instruction &I)

When an instruction is simplified, add all users of the instruction to the work lists because they mi...

void add(Instruction *I)

Add instruction to the worklist.

void push(Instruction *I)

Push the instruction onto the worklist stack.

Instruction * popDeferred()

void zap()

Check that the worklist is empty and nuke the backing store for the map.

void reserve(size_t Size)

static bool isBitwiseLogicOp(unsigned Opcode)

Determine if the Opcode is and/or/xor.

void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)

Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

const Module * getModule() const

Return the module owning the function this instruction belongs to or nullptr it the function does not...

void setAAMetadata(const AAMDNodes &N)

Sets the AA metadata on this instruction from the AAMDNodes structure.

bool isAssociative() const LLVM_READONLY

Return true if the instruction is associative:

bool isCommutative() const LLVM_READONLY

Return true if the instruction is commutative:

void setFastMathFlags(FastMathFlags FMF)

Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...

const Function * getFunction() const

Return the function this instruction belongs to.

bool isTerminator() const

void dropUBImplyingAttrsAndMetadata()

Drop any attributes or metadata that can cause immediate undefined behavior.

FastMathFlags getFastMathFlags() const LLVM_READONLY

Convenience function for getting all the fast-math flags, which must be an operator which supports th...

bool willReturn() const LLVM_READONLY

Return true if the instruction will return (unwinding is considered as a form of returning control fl...

unsigned getOpcode() const

Returns a member of one of the enums like Instruction::Add.

bool isBitwiseLogicOp() const

Return true if this is and/or/xor.

void dropPoisonGeneratingFlags()

Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.

void setDebugLoc(DebugLoc Loc)

Set the debug location information for this instruction.

void moveBefore(Instruction *MovePos)

Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...

Class to represent integer types.

static IntegerType * get(LLVMContext &C, unsigned NumBits)

This static method is the primary way of constructing an IntegerType.

A wrapper class for inspecting calls to intrinsic functions.

static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)

The landingpad instruction holds all of the information necessary to generate correct exception handl...

bool isCleanup() const

Return 'true' if this landingpad instruction is a cleanup.

unsigned getNumClauses() const

Get the number of clauses for this landing pad.

static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...

void addClause(Constant *ClauseVal)

Add a catch or filter clause to the landing pad.

bool isCatch(unsigned Idx) const

Return 'true' if the clause and index Idx is a catch clause.

bool isFilter(unsigned Idx) const

Return 'true' if the clause and index Idx is a filter clause.

Constant * getClause(unsigned Idx) const

Get the value of the clause at index Idx.

void setCleanup(bool V)

Indicate that this landingpad instruction is a cleanup.

A function/module analysis which provides an empty LastRunTrackingInfo.

This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.

static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)

Helper for client passes to set up the analysis usage on behalf of this pass.

An instruction for reading from memory.

const MDOperand & getOperand(unsigned I) const

unsigned getNumOperands() const

Return number of MDNode operands.

Tracking metadata reference owned by Metadata.

This is the common base class for memset/memcpy/memmove.

static MemoryLocation getForDest(const MemIntrinsic *MI)

Return a location representing the destination of a memory set or transfer.

This class represents min/max intrinsics.

static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)

Returns the comparison predicate underlying the intrinsic.

A Module instance is used to store all the information related to an LLVM module.

MDNode * getScopeList() const

An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...

Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.

bool hasNoSignedWrap() const

Test whether this operation is known to never undergo signed overflow, aka the nsw property.

bool hasNoUnsignedWrap() const

Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.

void addIncoming(Value *V, BasicBlock *BB)

Add an incoming value to the end of the PHI list.

op_range incoming_values()

BasicBlock * getIncomingBlock(unsigned i) const

Return incoming basic block number i.

Value * getIncomingValue(unsigned i) const

Return incoming value number x.

unsigned getNumIncomingValues() const

Return the number of incoming edges.

static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...

PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...

static PassRegistry * getPassRegistry()

getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...

In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...

static PoisonValue * get(Type *T)

Static factory methods - Return an 'poison' object of the specified type.

A set of analyses that are preserved following a run of a transformation pass.

static PreservedAnalyses all()

Construct a special preserved set that preserves all passes.

void preserveSet()

Mark an analysis set as preserved.

void preserve()

Mark an analysis as preserved.

An analysis pass based on the new PM to deliver ProfileSummaryInfo.

An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.

Analysis providing profile information.

bool hasProfileSummary() const

Returns true if profile summary is available.

A global registry used in conjunction with static constructors to make pluggable components (like tar...

Return a value (possibly void), from a function.

Value * getReturnValue() const

Convenience accessor. Returns null if there is no return value.

static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)

This class represents a cast from signed integer to floating point.

This class represents the LLVM 'select' instruction.

static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)

This instruction constructs a fixed permutation of two input vectors.

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

bool contains(ConstPtrType Ptr) const

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...

std::pair< const_iterator, bool > insert(const T &V)

insert - Insert an element into the set if it isn't already there.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void reserve(size_type N)

iterator erase(const_iterator CI)

void append(ItTy in_start, ItTy in_end)

Add the specified range to the end of the SmallVector.

typename SuperClass::iterator iterator

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

StringRef - Represent a constant reference to a string, i.e.

TargetFolder - Create constants with target dependent folding.

Analysis pass providing the TargetTransformInfo.

Analysis pass providing the TargetLibraryInfo.

Provides information about what library functions are available for the current target.

bool has(LibFunc F) const

Tests whether a library function is available.

bool getLibFunc(StringRef funcName, LibFunc &F) const

Searches for a particular function name.

Wrapper pass for TargetTransformInfo.

This pass provides access to the codegen interfaces that are needed for IR-level transformations.

std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const

Targets can implement their own combinations for target-specific intrinsics.

std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const

Can be used to implement target-specific instruction combining.

std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const

Can be used to implement target-specific instruction combining.

bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const

Query the target whether the specified address space cast from FromAS to ToAS is valid.

Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...

The instances of the Type class are immutable: once they are created, they are never changed.

const fltSemantics & getFltSemantics() const

bool isVectorTy() const

True if this is an instance of VectorType.

static IntegerType * getInt1Ty(LLVMContext &C)

unsigned getPointerAddressSpace() const

Get the address space of this pointer or pointer vector type.

unsigned getScalarSizeInBits() const LLVM_READONLY

If this is a vector type, return the getPrimitiveSizeInBits value for the element type.

bool isStructTy() const

True if this is an instance of StructType.

bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const

Return true if it makes sense to take the size of this type.

bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const

Return true if this is a type whose size is a known multiple of vscale.

static IntegerType * getInt32Ty(LLVMContext &C)

bool isIntegerTy() const

True if this is an instance of IntegerType.

TypeSize getPrimitiveSizeInBits() const LLVM_READONLY

Return the basic size of this type if it is a primitive type.

Type * getScalarType() const

If this is a vector type, return the element type, otherwise return 'this'.

This class represents a cast unsigned integer to floating point.

This function has undefined behavior.

A Use represents the edge between a Value definition and its users.

bool replaceUsesOfWith(Value *From, Value *To)

Replace uses of one Value with another.

Value * getOperand(unsigned i) const

unsigned getNumOperands() const

bool isDroppable() const

A droppable user is a user for which uses can be dropped without affecting correctness and should be ...

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const

This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...

bool hasOneUser() const

Return true if there is exactly one user of this value.

bool hasOneUse() const

Return true if there is exactly one use of this value.

iterator_range< user_iterator > users()

bool hasNUses(unsigned N) const

Return true if this Value has exactly N uses.

const Value * stripPointerCasts() const

Strip off pointer casts, all-zero GEPs and address space casts.

LLVMContext & getContext() const

All values hold a context through their type.

uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const

Returns the number of bytes known to be dereferenceable for the pointer value.

StringRef getName() const

Return a constant reference to the value's name.

void takeName(Value *V)

Transfer the name from V to this value.

static VectorType * get(Type *ElementType, ElementCount EC)

This static method is the primary way to construct an VectorType.

constexpr ScalarTy getFixedValue() const

constexpr bool isZero() const

An efficient, type-erasing, non-owning reference to a callable.

Type * getIndexedType() const

const ParentTy * getParent() const

reverse_self_iterator getReverseIterator()

self_iterator getIterator()

This class implements an extremely fast bulk output stream that can only output to a stream.

A raw_ostream that writes to an std::string.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

bool isNoFPClassCompatibleType(Type *Ty)

Returns true if this is a type legal for the 'nofpclass' attribute.

@ C

The default llvm calling convention, compatible with C.

Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})

Look up the Function declaration of the intrinsic id in the Module M.

cst_pred_ty< is_all_ones > m_AllOnes()

Match an integer or vector with all bits set.

class_match< PoisonValue > m_Poison()

Match an arbitrary poison constant.

BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)

PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)

Matches GEP with i8 source element type.

BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)

class_match< BinaryOperator > m_BinOp()

Match an arbitrary binary operation and ignore it.

CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)

class_match< Constant > m_Constant()

Match an arbitrary Constant and ignore it.

CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)

Matches Trunc.

BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)

br_match m_UnconditionalBr(BasicBlock *&Succ)

specific_intval< false > m_SpecificInt(const APInt &V)

Match a specific integer value or vector with all elements equal to the value.

bool match(Val *V, const Pattern &P)

BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)

Matches integer division operations.

bind_ty< Instruction > m_Instruction(Instruction *&I)

Match an instruction, capturing it if we match.

specificval_ty m_Specific(const Value *V)

Match if we have a specific specified value.

DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)

constantexpr_match m_ConstantExpr()

Match a constant expression or a constant that contains a constant expression.

BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)

Matches logical shift operations.

cst_pred_ty< is_nonnegative > m_NonNegative()

Match an integer or vector of non-negative values.

class_match< ConstantInt > m_ConstantInt()

Match an arbitrary ConstantInt and ignore it.

ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)

Matches SelectInst.

match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)

Combine two pattern matchers matching L && R.

BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)

apint_match m_APIntAllowPoison(const APInt *&Res)

Match APInt while allowing poison in splat vector constants.

OneUse_match< T > m_OneUse(const T &SubPattern)

auto m_LogicalOr()

Matches L || R where L and R are arbitrary values.

BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)

Matches a 'Neg' as 'sub 0, V'.

TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)

Matches ShuffleVectorInst independently of mask value.

match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()

Match an arbitrary immediate Constant and ignore it.

SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)

CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)

Matches ZExt.

BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)

brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)

match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)

Match either "add" or "or disjoint".

CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)

CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)

Matches BitCast.

match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)

Match either "sext" or "zext nneg".

BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)

apint_match m_APInt(const APInt *&Res)

Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.

class_match< Value > m_Value()

Match an arbitrary value and ignore it.

AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)

Matches a BinaryOperator with LHS and RHS in either order.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)

CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)

BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)

CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)

match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)

BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)

Matches shift operations.

BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)

cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()

Match a floating-point non-zero.

m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)

auto m_LogicalAnd()

Matches L && R where L and R are arbitrary values.

match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)

auto m_Undef()

Match an arbitrary undef constant.

BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)

Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.

BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)

CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)

Matches SExt.

is_zero m_Zero()

Match any null constant or a vector with all elements equal to 0.

CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)

Matches PtrToInt.

BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)

match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)

Combine two pattern matchers matching L || R.

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)

detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)

zip iterator for two or more iteratable types.

void stable_sort(R &&Range)

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)

Given operands for a GetElementPtrInst, fold the result or return null.

bool succ_empty(const Instruction *I)

Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)

Given an operand for a Freeze, see if we can fold the result.

FunctionPass * createInstructionCombiningPass()

bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I)

Don't use information from its non-constant operands.

std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)

Remove all instructions from a basic block other than its terminator and any present EH pad instructi...

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)

Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...

void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)

Finds the debug info intrinsics describing a value.

void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)

Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...

auto successors(const MachineBasicBlock *BB)

bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)

Return true if this is a call to an allocation function that does not have side effects that we are r...

std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)

If a function is part of an allocation family (e.g.

Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)

Try to turn a call to @llvm.objectsize into an integer value of the given Type.

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)

Like simplifyInstruction but the operands of I are replaced with NewOps.

void append_range(Container &C, Range &&R)

Wrapper function to append range R to container C.

const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)

This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....

Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)

Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.

iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)

Make a range that does early increment to allow mutation of the underlying range without disrupting i...

gep_type_iterator gep_type_end(const User *GEP)

Value * getReallocatedOperand(const CallBase *CB)

If this is a call to a realloc function, return the reallocated operand.

bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)

Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...

bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)

If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...

int countr_zero(T Val)

Count number of 0's from the least significant bit to the most stopping at the first 1.

Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)

Given operands for an Add, fold the result or return null.

Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)

ConstantFoldConstant - Fold the constant using the specified DataLayout.

constexpr bool has_single_bit(T Value) noexcept

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)

Return true if the result produced by the instruction is not used, and the instruction will return.

bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)

Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...

Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)

Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...

constexpr unsigned MaxAnalysisRecursionDepth

auto reverse(ContainerTy &&C)

void sort(IteratorTy Start, IteratorTy End)

FPClassTest

Floating-point class tests, supported by 'is_fpclass' intrinsic.

bool LowerDbgDeclare(Function &F)

Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

void report_fatal_error(Error Err, bool gen_crash_diag=true)

Report a serious error, calling any installed error handler.

void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)

Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value that has an associated llvm....

Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)

Attempt to constant fold a cast with the specified operand.

bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)

canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...

EHPersonality classifyEHPersonality(const Value *Pers)

See if the given exception handling personality function is one that we understand.

Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)

Given operands for an ExtractValueInst, fold the result or return null.

Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)

Attempt to constant fold a binary operation with the specified operands.

bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)

Point debug users of From to To or salvage them.

bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)

Return true if the given value is known to be non-zero when defined.

constexpr int PoisonMaskElem

auto drop_end(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the last N elements excluded.

Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)

Given operands for a BinaryOperator, fold the result or return null.

@ Or

Bitwise or logical OR of integers.

DWARFExpression::Operation Op

Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)

ConstantFoldInstruction - Try to constant fold the specified instruction.

bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)

Return true if this function can prove that V does not have undef bits and is never poison.

Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)

If this if a call to a free function, return the freed operand.

constexpr unsigned BitWidth

bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)

Return true if this function can prove that the instruction I will always transfer execution to one o...

gep_type_iterator gep_type_begin(const User *GEP)

auto predecessors(const MachineBasicBlock *BB)

bool is_contained(R &&Range, const E &Element)

Returns true if Element is found in Range.

bool equal(L &&LRange, R &&RRange)

Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.

bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)

Returns true if the give value is known to be non-negative.

static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)

Filter the DbgRecord range to DbgVariableRecord types only and downcast.

void initializeInstCombine(PassRegistry &)

Initialize all passes linked into the InstCombine library.

void initializeInstructionCombiningPassPass(PassRegistry &)

Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)

std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)

Return true if RHS is known to be implied true by LHS.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

static unsigned int semanticsPrecision(const fltSemantics &)

unsigned countMinLeadingOnes() const

Returns the minimum number of leading one bits.

unsigned getBitWidth() const

Get the bit width of this value.

unsigned countMinLeadingZeros() const

Returns the minimum number of leading zero bits.

A CRTP mix-in to automatically provide informational APIs needed for passes.

SimplifyQuery getWithInstruction(const Instruction *I) const

SimplifyQuery getWithoutUndef() const