LLVM: lib/Transforms/InstCombine/InstCombineCalls.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

47#include "llvm/IR/IntrinsicsAArch64.h"

48#include "llvm/IR/IntrinsicsAMDGPU.h"

49#include "llvm/IR/IntrinsicsARM.h"

50#include "llvm/IR/IntrinsicsHexagon.h"

74#include

75#include

76#include

77#include

78#include

79#include

80

81#define DEBUG_TYPE "instcombine"

83

84using namespace llvm;

86

87STATISTIC(NumSimplified, "Number of library calls simplified");

88

90 "instcombine-guard-widening-window",

92 cl::desc("How wide an instruction window to bypass looking for "

93 "another guard"));

94

95

96

99 if (ITy->getBitWidth() < 32)

101 }

102 return Ty;

103}

104

105

106

107

109 auto *Src = MI->getRawSource();

111 if (!Src->hasOneUse())

112 return false;

114 }

116}

117

120 MaybeAlign CopyDstAlign = MI->getDestAlign();

121 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {

122 MI->setDestAlignment(DstAlign);

123 return MI;

124 }

125

127 MaybeAlign CopySrcAlign = MI->getSourceAlign();

128 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {

129 MI->setSourceAlignment(SrcAlign);

130 return MI;

131 }

132

133

134

135

136 if (isModSet(AA->getModRefInfoMask(MI->getDest()))) {

137

139 return MI;

140 }

141

142

143

145

147 return MI;

148 }

149

150

151

153 if (!MemOpLength) return nullptr;

154

155

156

157

158

160 assert(Size && "0-sized memory transferring should be removed already.");

161

163 return nullptr;

164

165

166

167

168

169 if (MI->isAtomic())

170 if (*CopyDstAlign < Size || *CopySrcAlign < Size)

171 return nullptr;

172

173

175

176

177

178 AAMDNodes AACopyMD = MI->getAAMetadata().adjustForAccess(Size);

179

180 Value *Src = MI->getArgOperand(1);

181 Value *Dest = MI->getArgOperand(0);

183

184 L->setAlignment(*CopySrcAlign);

185 L->setAAMetadata(AACopyMD);

186 MDNode *LoopMemParallelMD =

187 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);

188 if (LoopMemParallelMD)

189 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);

190 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);

191 if (AccessGroupMD)

192 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);

193

195

198 if (LoopMemParallelMD)

199 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);

200 if (AccessGroupMD)

201 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);

203

205

206 L->setVolatile(MT->isVolatile());

208 }

209 if (MI->isAtomic()) {

210

213 }

214

215

217 return MI;

218}

219

221 const Align KnownAlignment =

224 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {

225 MI->setDestAlignment(KnownAlignment);

226 return MI;

227 }

228

229

230

231

232 if (isModSet(AA->getModRefInfoMask(MI->getDest()))) {

233

235 return MI;

236 }

237

238

239

240

242

244 return MI;

245 }

246

247

251 return nullptr;

253 assert(Len && "0-sized memory setting should be removed already.");

254 const Align Alignment = MI->getDestAlign().valueOrOne();

255

256

257

258

259

260 if (MI->isAtomic() && Alignment < Len)

261 return nullptr;

262

263

265 Value *Dest = MI->getDest();

266

267

268 Constant *FillVal = ConstantInt::get(

274 DbgAssign->replaceVariableLocationOp(FillC, FillVal);

275 }

276

278 if (MI->isAtomic())

280

281

283 return MI;

284 }

285

286 return nullptr;

287}

288

289

290

292 Value *LoadPtr = II.getArgOperand(0);

293 const Align Alignment = II.getParamAlign(0).valueOrOne();

294

295

296

298 LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,

299 "unmaskedload");

300 L->copyMetadata(II);

301 return L;

302 }

303

304

305

307 II.getDataLayout(), &II, &AC)) {

308 LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,

309 "unmaskedload");

311 return Builder.CreateSelect(II.getArgOperand(1), LI, II.getArgOperand(2));

312 }

313

314 return nullptr;

315}

316

317

318

319

321 Value *StorePtr = II.getArgOperand(1);

322 Align Alignment = II.getParamAlign(1).valueOrOne();

324 if (!ConstMask)

325 return nullptr;

326

327

330

331

333 StoreInst *S =

334 new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);

336 return S;

337 }

338

340 return nullptr;

341

342

344 APInt PoisonElts(DemandedElts.getBitWidth(), 0);

346 PoisonElts))

348

349 return nullptr;

350}

351

352

353

354

355

356

357

360 if (!ConstMask)

361 return nullptr;

362

363

364

365

366 if (ConstMask->isAllOnesValue())

367 if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) {

369 const Align Alignment = II.getParamAlign(0).valueOrOne();

370 LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr,

371 Alignment, "load.scalar");

373 Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast");

375 }

376

377 return nullptr;

378}

379

380

381

382

383

384

387 if (!ConstMask)

388 return nullptr;

389

390

393

394

395 if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) {

396

397 if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) {

399 Align Alignment = II.getParamAlign(1).valueOrOne();

400 StoreInst *S = new StoreInst(SplatValue, SplatPtr, false,

401 Alignment);

403 return S;

404 }

405 }

406

407

408 if (ConstMask->isAllOnesValue()) {

409 Align Alignment = II.getParamAlign(1).valueOrOne();

411 ElementCount VF = WideLoadTy->getElementCount();

415 Builder.CreateExtractElement(II.getArgOperand(0), LastLane);

416 StoreInst *S =

417 new StoreInst(Extract, SplatPtr, false, Alignment);

419 return S;

420 }

421 }

423 return nullptr;

424

425

427 APInt PoisonElts(DemandedElts.getBitWidth(), 0);

429 PoisonElts))

432 PoisonElts))

434

435 return nullptr;

436}

437

438

439

440

441

442

443

444

445

448 auto *Arg = II.getArgOperand(0);

449 auto *StrippedArg = Arg->stripPointerCasts();

450 auto *StrippedInvariantGroupsArg = StrippedArg;

452 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&

453 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)

454 break;

455 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts();

456 }

457 if (StrippedArg == StrippedInvariantGroupsArg)

458 return nullptr;

459

460 Value *Result = nullptr;

461

462 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)

464 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)

466 else

468 "simplifyInvariantGroupIntrinsic only handles launder and strip");

469 if (Result->getType()->getPointerAddressSpace() !=

470 II.getType()->getPointerAddressSpace())

472

474}

475

477 assert((II.getIntrinsicID() == Intrinsic::cttz ||

478 II.getIntrinsicID() == Intrinsic::ctlz) &&

479 "Expected cttz or ctlz intrinsic");

480 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;

481 Value *Op0 = II.getArgOperand(0);

482 Value *Op1 = II.getArgOperand(1);

484

485

487 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;

491 }

492

493 if (II.getType()->isIntOrIntVectorTy(1)) {

494

497

498

499 assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1");

501 }

502

503

506 II.dropUBImplyingAttrsAndMetadata();

508 }

509

511

512 if (IsTZ) {

513

516

517

520

521

524 auto *CttzZext =

527 }

528

529

530

536 }

537

538

539

544

547

548

551 Value *ConstCttz =

553 return BinaryOperator::CreateAdd(ConstCttz, X);

554 }

555

556

559 Value *ConstCttz =

561 return BinaryOperator::CreateSub(ConstCttz, X);

562 }

563

564

567 ConstantInt::get(II.getType(), II.getType()->getScalarSizeInBits());

568 return BinaryOperator::CreateSub(Width, X);

569 }

570 } else {

571

574 Value *ConstCtlz =

576 return BinaryOperator::CreateAdd(ConstCtlz, X);

577 }

578

579

582 Value *ConstCtlz =

584 return BinaryOperator::CreateSub(ConstCtlz, X);

585 }

586

587

591 Type *Ty = II.getType();

592 unsigned BitWidth = Ty->getScalarSizeInBits();

597 }

598 }

599

600

601

603 if (IsTZ)

606 ConstantInt::get(R->getType(), R->getType()->getScalarSizeInBits() - 1),

607 R);

610 return BO;

611 }

612

614

615

620

621

622

623

624

625 if (PossibleZeros == DefiniteZeros) {

626 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);

628 }

629

630

631

632

637 }

638

639

641 if (BitWidth != 1 && II.hasRetAttr(Attribute::Range) &&

642 II.getMetadata(LLVMContext::MD_range)) {

645 II.addRangeRetAttr(Range);

646 return &II;

647 }

648

649 return nullptr;

650}

651

653 assert(II.getIntrinsicID() == Intrinsic::ctpop &&

654 "Expected ctpop intrinsic");

655 Type *Ty = II.getType();

656 unsigned BitWidth = Ty->getScalarSizeInBits();

657 Value *Op0 = II.getArgOperand(0);

659

660

661

664

665

668 X == Y)

670

671

678 }

679

680

686 }

687

688

689

693 }

694

697

698

699

700

701

702

703 if ((~Known.Zero).isPowerOf2())

704 return BinaryOperator::CreateLShr(

705 Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2()));

706

707

708

709

714 Ty);

715

716

719 II.getRange().value_or(ConstantRange::getFull(BitWidth));

720

723

727

730

731 if (Range != OldRange) {

732 II.addRangeRetAttr(Range);

733 return &II;

734 }

735 }

736

737 return nullptr;

738}

739

740

741

743 bool IsExtension) {

744

746 if (C)

747 return nullptr;

748

750 unsigned NumIndexes = RetTy->getNumElements();

751

752

753 if (!RetTy->getElementType()->isIntegerTy(8) ||

754 (NumIndexes != 8 && NumIndexes != 16))

755 return nullptr;

756

757

758

759 unsigned int StartIndex = (unsigned)IsExtension;

760 auto *SourceTy =

762

763

764

765 unsigned NumElementsPerSource = SourceTy->getNumElements();

766

767

768

769

770

771 if (NumIndexes > NumElementsPerSource)

772 return nullptr;

773

774

775

776 unsigned int NumSourceOperands = II.arg_size() - 1 - (unsigned)IsExtension;

777

778

779

780

784

785 int Indexes[16];

786 for (unsigned I = 0; I < NumIndexes; ++I) {

787 Constant *COp = C->getAggregateElement(I);

788

790 return nullptr;

791

793 Indexes[I] = -1;

794 continue;

795 }

796

798

799

800 unsigned SourceOperandIndex = Index / NumElementsPerSource;

801

802 unsigned SourceOperandElementIndex = Index % NumElementsPerSource;

803

804 Value *SourceOperand;

805 if (SourceOperandIndex >= NumSourceOperands) {

806

807

808 SourceOperandIndex = NumSourceOperands;

809 if (IsExtension) {

810

811

812 SourceOperand = II.getArgOperand(0);

813 SourceOperandElementIndex = I;

814 } else {

815

816

818 SourceOperandElementIndex = 0;

819 }

820 } else {

821 SourceOperand = II.getArgOperand(SourceOperandIndex + StartIndex);

822 }

823

824

825

826

827

829 NumElementsPerSource)

830 return nullptr;

831

832

833

834 unsigned NumSlots = ValueToShuffleSlot.size();

835

836

837 if (NumSlots == 2 && !ValueToShuffleSlot.contains(SourceOperand))

838 return nullptr;

839

840 auto [It, Inserted] =

841 ValueToShuffleSlot.try_emplace(SourceOperand, NumSlots);

842 if (Inserted)

843 ShuffleOperands[It->getSecond()] = SourceOperand;

844

845 unsigned RemappedIndex =

846 (It->getSecond() * NumElementsPerSource) + SourceOperandElementIndex;

847 Indexes[I] = RemappedIndex;

848 }

849

851 ShuffleOperands[0], ShuffleOperands[1], ArrayRef(Indexes, NumIndexes));

853}

854

855

856

858 unsigned NumOperands) {

859 assert(I.arg_size() >= NumOperands && "Not enough operands");

860 assert(E.arg_size() >= NumOperands && "Not enough operands");

861 for (unsigned i = 0; i < NumOperands; i++)

862 if (I.getArgOperand(i) != E.getArgOperand(i))

863 return false;

864 return true;

865}

866

867

868

869

870

871

872

873

874

875

876static bool

878 std::function<bool(const IntrinsicInst &)> IsStart) {

879

880

881

883 for (; BI != BE; ++BI) {

885 if (I->isDebugOrPseudoInst() ||

887 continue;

888 if (IsStart(*I)) {

892 return true;

893 }

894

895 continue;

896 }

897 }

898 break;

899 }

900

901 return false;

902}

903

906

907

908 return II.getIntrinsicID() == Intrinsic::vastart ||

909 (II.getIntrinsicID() == Intrinsic::vacopy &&

910 I.getArgOperand(0) != II.getArgOperand(1));

911 });

912 return nullptr;

913}

914

916 assert(Call.arg_size() > 1 && "Need at least 2 args to swap");

917 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);

919 Call.setArgOperand(0, Arg1);

920 Call.setArgOperand(1, Arg0);

921 return &Call;

922 }

923 return nullptr;

924}

925

926

927

935

937InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {

939 Value *OperationResult = nullptr;

944

945

946 for (User *U : WO->users()) {

948 continue;

949

950 for (auto &AssumeVH : AC.assumptionsFor(U)) {

951 if (!AssumeVH)

952 continue;

955 continue;

957 true))

958 continue;

961 Result->takeName(WO);

964 Inst->setHasNoSignedWrap();

965 else

966 Inst->setHasNoUnsignedWrap();

967 }

970 }

971 }

972

973 return nullptr;

974}

975

977 Ty = Ty->getScalarType();

978 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE;

979}

980

982 Ty = Ty->getScalarType();

983 return F.getDenormalMode(Ty->getFltSemantics()).inputsAreZero();

984}

985

986

987

988

991 switch (static_cast<unsigned>(Mask)) {

995 break;

999 break;

1003 break;

1007 break;

1011 break;

1015 break;

1019 break;

1023 break;

1027 break;

1031 break;

1035 break;

1039 break;

1040 default:

1041 break;

1042 }

1043

1045}

1046

1048 Value *Src0 = II.getArgOperand(0);

1049 Value *Src1 = II.getArgOperand(1);

1055 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;

1056

1057 const bool IsStrict =

1058 II.getFunction()->getAttributes().hasFnAttr(Attribute::StrictFP);

1059

1062

1063

1064 II.setArgOperand(1, ConstantInt::get(Src1->getType(), fneg(Mask)));

1066 }

1067

1072 }

1073

1074 if ((OrderedMask == fcInf || OrderedInvertedMask == fcInf) &&

1075 (IsOrdered || IsUnordered) && !IsStrict) {

1076

1077

1078

1079

1083 if (OrderedInvertedMask == fcInf)

1085

1086 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Src0);

1087 Value *CmpInf = Builder.CreateFCmp(Pred, Fabs, Inf);

1090 }

1091

1093 (IsOrdered || IsUnordered) && !IsStrict) {

1094

1095

1096

1097

1100 Value *EqInf = IsUnordered ? Builder.CreateFCmpUEQ(Src0, Inf)

1101 : Builder.CreateFCmpOEQ(Src0, Inf);

1102

1105 }

1106

1107 if ((OrderedInvertedMask == fcPosInf || OrderedInvertedMask == fcNegInf) &&

1108 (IsOrdered || IsUnordered) && !IsStrict) {

1109

1110

1111

1112

1114 OrderedInvertedMask == fcNegInf);

1115 Value *NeInf = IsUnordered ? Builder.CreateFCmpUNE(Src0, Inf)

1116 : Builder.CreateFCmpONE(Src0, Inf);

1119 }

1120

1121 if (Mask == fcNan && !IsStrict) {

1122

1123

1128 }

1129

1131

1136 }

1137

1139

1140

1141

1142

1143

1144

1145

1146

1147

1148

1149

1150

1151

1152

1153 if (!IsStrict && (IsOrdered || IsUnordered) &&

1158

1161 Src0, Zero);

1162

1165 }

1166

1168

1169

1170

1171

1173 II.setArgOperand(

1175 return &II;

1176 }

1177

1178

1179

1180

1183

1184 return nullptr;

1185}

1186

1190 return false;

1192 return true;

1193

1197

1198 return std::nullopt;

1199}

1200

1203 if (std::optional Sign = getKnownSign(Op, SQ))

1204 return Sign;

1205

1209

1210 return std::nullopt;

1211}

1212

1213

1216 std::optional Known1 = getKnownSign(Op1, SQ);

1217 if (!Known1)

1218 return false;

1219 std::optional Known0 = getKnownSign(Op0, SQ);

1220 if (!Known0)

1221 return false;

1222 return *Known0 == *Known1;

1223}

1224

1225

1226

1230 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||

1231 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&

1232 "Expected a min or max intrinsic");

1233

1234

1235 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);

1237 const APInt *C0, *C1;

1240 return nullptr;

1241

1242

1243 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;

1245 if ((IsSigned && Add->hasNoSignedWrap()) ||

1246 (!IsSigned && Add->hasNoUnsignedWrap()))

1247 return nullptr;

1248

1249

1250

1251 bool Overflow;

1253 IsSigned ? C1->ssub_ov(*C0, Overflow) : C1->usub_ov(*C0, Overflow);

1254 assert(!Overflow && "Expected simplify of min/max");

1255

1256

1257

1258 Constant *NewMinMaxC = ConstantInt::get(II->getType(), CDiff);

1259 Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID, X, NewMinMaxC);

1260 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax, Add->getOperand(1))

1261 : BinaryOperator::CreateNUWAdd(NewMinMax, Add->getOperand(1));

1262}

1263

1266

1267

1268

1269

1271 BinaryOperator *AddSub;

1272 const APInt *MinValue, *MaxValue;

1275 return nullptr;

1276 } else if (match(&MinMax1,

1279 return nullptr;

1280 } else

1281 return nullptr;

1282

1283

1284

1285 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)

1286 return nullptr;

1287

1288 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;

1289

1290

1292 return nullptr;

1293

1294

1296 return nullptr;

1297

1298

1300

1302 if (AddSub->getOpcode() == Instruction::Add)

1303 IntrinsicID = Intrinsic::sadd_sat;

1304 else if (AddSub->getOpcode() == Instruction::Sub)

1305 IntrinsicID = Intrinsic::ssub_sat;

1306 else

1307 return nullptr;

1308

1309

1310

1313 return nullptr;

1314

1315

1318 Value *Sat = Builder.CreateIntrinsic(IntrinsicID, NewTy, {AT, BT});

1320}

1321

1322

1323

1324

1325

1328 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);

1330 const APInt *C0, *C1;

1332 return nullptr;

1333

1335 switch (II->getIntrinsicID()) {

1336 case Intrinsic::smax:

1339 break;

1340 case Intrinsic::smin:

1343 break;

1344 case Intrinsic::umax:

1347 break;

1348 case Intrinsic::umin:

1351 break;

1352 default:

1354 }

1356 return nullptr;

1357

1358

1359

1360 Value *Cmp = Builder.CreateICmp(Pred, X, I1);

1362}

1363

1364

1365

1371 if (LHS)

1372 return nullptr;

1373

1377 return nullptr;

1378

1379

1380

1381

1382

1384 if (InnerMinMaxID != MinMaxID &&

1385 !(((MinMaxID == Intrinsic::umax && InnerMinMaxID == Intrinsic::smax) ||

1386 (MinMaxID == Intrinsic::smin && InnerMinMaxID == Intrinsic::umin)) &&

1388 return nullptr;

1389

1391 Value *CondC = Builder.CreateICmp(Pred, C0, C1);

1392 Value *NewC = Builder.CreateSelect(CondC, C0, C1);

1393 return Builder.CreateIntrinsic(InnerMinMaxID, II->getType(),

1394 {LHS->getArgOperand(0), NewC});

1395}

1396

1397

1398

1402

1410 return nullptr;

1411

1412

1415 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||

1417 return nullptr;

1418

1419

1421 MinMaxID, II->getType());

1422 Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID, X, Y);

1425}

1426

1427

1429

1433 if (LHS || RHS || LHS->getIntrinsicID() != MinMaxID ||

1434 RHS->getIntrinsicID() != MinMaxID ||

1435 (LHS->hasOneUse() && RHS->hasOneUse()))

1436 return nullptr;

1437

1438 Value *A = LHS->getArgOperand(0);

1439 Value *B = LHS->getArgOperand(1);

1440 Value *C = RHS->getArgOperand(0);

1441 Value *D = RHS->getArgOperand(1);

1442

1443

1444 Value *MinMaxOp = nullptr;

1445 Value *ThirdOp = nullptr;

1446 if (LHS->hasOneUse()) {

1447

1448

1449 if (D == A || C == A) {

1450

1451

1452 MinMaxOp = RHS;

1453 ThirdOp = B;

1454 } else if (D == B || C == B) {

1455

1456

1457 MinMaxOp = RHS;

1458 ThirdOp = A;

1459 }

1460 } else {

1461 assert(RHS->hasOneUse() && "Expected one-use operand");

1462

1463 if (D == A || D == B) {

1464

1465

1466 MinMaxOp = LHS;

1467 ThirdOp = C;

1468 } else if (C == A || C == B) {

1469

1470

1471 MinMaxOp = LHS;

1472 ThirdOp = D;

1473 }

1474 }

1475

1476 if (!MinMaxOp || !ThirdOp)

1477 return nullptr;

1478

1483}

1484

1485

1486

1490 II->getCalledFunction()->isSpeculatable())

1491 return nullptr;

1492

1497 return isa(Arg.get()) ||

1498 isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(),

1499 Arg.getOperandNo(), nullptr);

1500 });

1501 if (!NonConstArg ||

1503 return nullptr;

1504

1505

1506

1508 return nullptr;

1509

1510

1512 Type *SrcTy = X->getType();

1513 for (Use &Arg : II->args()) {

1517 else if (match(&Arg,

1519 X->getType() == SrcTy)

1522

1526 else

1527 return nullptr;

1528 } else

1529 return nullptr;

1530 }

1531

1532

1534

1535

1538 Value *NewIntrinsic =

1539 Builder.CreateIntrinsic(ResTy, II->getIntrinsicID(), NewArgs, FPI);

1541}

1542

1543

1544

1547 return nullptr;

1548

1549

1550

1552 return match(V, m_OneUse(m_VecReverse(m_Value())));

1553 }))

1554 return nullptr;

1555

1559 for (Use &Arg : II->args()) {

1561 Arg.getOperandNo(), nullptr))

1569 else

1570 return nullptr;

1571 }

1572

1573

1576 II->getType(), II->getIntrinsicID(), NewArgs, FPI);

1577 return Builder.CreateVectorReverse(NewIntrinsic);

1578}

1579

1580

1581

1582

1583template <Intrinsic::ID IntrID>

1586 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,

1587 "This helper only supports BSWAP and BITREVERSE intrinsics");

1588

1590

1591

1594 Value *OldReorderX, *OldReorderY;

1596

1597

1598

1599

1600

1604 }

1605

1607 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, Y);

1609 }

1610

1612 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, X);

1614 }

1615 }

1616 return nullptr;

1617}

1618

1619

1620

1622 switch (IID) {

1623 case Intrinsic::smax:

1624 case Intrinsic::smin:

1625 case Intrinsic::umax:

1626 case Intrinsic::umin:

1627 case Intrinsic::maximum:

1628 case Intrinsic::minimum:

1629 case Intrinsic::maximumnum:

1630 case Intrinsic::minimumnum:

1631 case Intrinsic::maxnum:

1632 case Intrinsic::minnum:

1633 return true;

1634 default:

1635 return false;

1636 }

1637}

1638

1639

1640

1641

1642

1643

1648

1649

1650

1651 auto IID = II->getIntrinsicID();

1655 return nullptr;

1656

1657 auto *InvariantBinaryInst =

1661 return InvariantBinaryInst;

1662}

1663

1665 if (!CanReorderLanes)

1666 return nullptr;

1667

1670 return V;

1671

1676 return nullptr;

1677

1678 int Sz = Mask.size();

1680 for (int Idx : Mask) {

1682 return nullptr;

1683 UsedIndices.set(Idx);

1684 }

1685

1686

1687

1688 return UsedIndices.all() ? V : nullptr;

1689}

1690

1691

1692

1693

1694

1695template <Intrinsic::ID IntrID>

1700 static_assert(IntrID == Intrinsic::cttz || IntrID == Intrinsic::ctlz,

1701 "This helper only supports cttz and ctlz intrinsics");

1702

1704 Value *ZeroUndef;

1707 return nullptr;

1708

1709 unsigned BitWidth = I1->getType()->getScalarSizeInBits();

1710 auto LessBitWidth = [BitWidth](auto &C) { return C.ult(BitWidth); };

1712

1713

1714 return nullptr;

1715

1716 Type *Ty = I1->getType();

1718 IntrID == Intrinsic::cttz ? Instruction::Shl : Instruction::LShr,

1719 IntrID == Intrinsic::cttz

1720 ? ConstantInt::get(Ty, 1)

1723 return Builder.CreateBinaryIntrinsic(

1724 IntrID, Builder.CreateOr(CtOp, NewConst),

1726}

1727

1728

1729

1732 switch (ROp) {

1733 case Intrinsic::umax:

1734 case Intrinsic::umin:

1735 if (HasNUW && LOp == Instruction::Add)

1736 return true;

1737 if (HasNUW && LOp == Instruction::Shl)

1738 return true;

1739 return false;

1740 case Intrinsic::smax:

1741 case Intrinsic::smin:

1742 return HasNSW && LOp == Instruction::Add;

1743 default:

1744 return false;

1745 }

1746}

1747

1748

1749

1750

1754 Value *LHS = II->getOperand(0), *RHS = II->getOperand(1);

1756

1759

1760 if (!Op0 || !Op1)

1761 return nullptr;

1762

1764 return nullptr;

1765

1767 return nullptr;

1768

1773

1775 return nullptr;

1776

1781

1782

1783

1785 if (A == D || B == C)

1787 else

1788 return nullptr;

1789 }

1790

1792 if (A == C) {

1793 Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode, B, D);

1794 NewBinop =

1796 } else if (B == D) {

1797 Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode, A, C);

1798 NewBinop =

1800 } else {

1801 return nullptr;

1802 }

1803

1806

1807 return NewBinop;

1808}

1809

1810

1811

1812

1814

1815

1819 SQ.getWithInstruction(&CI)))

1821 }

1822

1825

1826

1827

1830 return &CI;

1831 }

1832

1834 if (II)

1835 return visitCallBase(CI);

1836

1837

1838

1840 if (auto NumBytes = MI->getLengthInBytes()) {

1841

1842 if (NumBytes->isZero())

1844

1845

1846

1847 if (MI->isAtomic() &&

1848 (NumBytes->isNegative() ||

1849 (NumBytes->getZExtValue() % MI->getElementSizeInBytes() != 0))) {

1851 assert(MI->getType()->isVoidTy() &&

1852 "non void atomic unordered mem intrinsic");

1854 }

1855 }

1856

1857

1858 if (MI->isVolatile())

1859 return nullptr;

1860

1862

1863 if (MTI->getSource() == MTI->getDest())

1865 }

1866

1867 auto IsPointerUndefined = [MI](Value *Ptr) {

1870 MI->getFunction(),

1872 };

1873 bool SrcIsUndefined = false;

1874

1875

1878 return I;

1879 SrcIsUndefined = IsPointerUndefined(MTI->getRawSource());

1882 return I;

1883 }

1884

1885

1886 if (SrcIsUndefined || IsPointerUndefined(MI->getRawDest())) {

1887 Builder.CreateAssumption(Builder.CreateIsNull(MI->getLength()));

1889 }

1890

1891

1892

1893

1896 if (GVSrc->isConstant()) {

1899 MMI->isAtomic()

1900 ? Intrinsic::memcpy_element_unordered_atomic

1901 : Intrinsic::memcpy;

1907 return II;

1908 }

1909 }

1910 }

1911

1912

1913

1915 auto VWidth = IIFVTy->getNumElements();

1916 APInt PoisonElts(VWidth, 0);

1919 if (V != II)

1921 return II;

1922 }

1923 }

1924

1925 if (II->isCommutative()) {

1926 if (auto Pair = matchSymmetricPair(II->getOperand(0), II->getOperand(1))) {

1929 return II;

1930 }

1931

1933 return NewCall;

1934 }

1935

1936

1937

1938

1939

1943 }

1944

1946 switch (IID) {

1947 case Intrinsic::objectsize: {

1950 &InsertedInstructions)) {

1951 for (Instruction *Inserted : InsertedInstructions)

1954 }

1955 return nullptr;

1956 }

1957 case Intrinsic::abs: {

1958 Value *IIOperand = II->getArgOperand(0);

1959 bool IntMinIsPoison = cast(II->getArgOperand(1))->isOneValue();

1960

1961

1967 }

1970

1972

1973 if (match(IIOperand,

1976 bool NSW =

1977 cast(IIOperand)->hasNoSignedWrap() && IntMinIsPoison;

1978 auto *XY = NSW ? Builder.CreateNSWMul(X, Y) : Builder.CreateMul(X, Y);

1980 }

1981

1982 if (std::optional Known =

1984

1985

1986 if (!*Known)

1988

1989

1990

1991 if (IntMinIsPoison)

1994 }

1995

1996

1997

1999 Value *NarrowAbs =

2000 Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse());

2001 return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType());

2002 }

2003

2004

2005

2008 return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1));

2009

2010 break;

2011 }

2012 case Intrinsic::umin: {

2013 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);

2014

2016 assert(II->getType()->getScalarSizeInBits() != 1 &&

2017 "Expected simplify of umin with max constant");

2019 Value *Cmp = Builder.CreateICmpNE(I0, Zero);

2021 }

2022

2023 if (Value *FoldedCttz =

2027

2028 if (Value *FoldedCtlz =

2032 [[fallthrough]];

2033 }

2034 case Intrinsic::umax: {

2035 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);

2038 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {

2039 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);

2040 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());

2041 }

2046 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);

2047 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());

2048 }

2049 }

2050

2051

2052

2053

2059 return nullptr;

2060 if (C->isZero())

2061 return nullptr;

2063 return nullptr;

2064

2065 Value *Cmp = Builder.CreateICmpEQ(X, ConstantInt::get(X->getType(), 0));

2066 Value *NewSelect =

2067 Builder.CreateSelect(Cmp, ConstantInt::get(X->getType(), 1), A);

2069 };

2070

2071 if (IID == Intrinsic::umax) {

2072 if (Instruction *I = foldMaxMulShift(I0, I1))

2073 return I;

2074 if (Instruction *I = foldMaxMulShift(I1, I0))

2075 return I;

2076 }

2077

2078

2079

2080 [[fallthrough]];

2081 }

2082 case Intrinsic::smax:

2083 case Intrinsic::smin: {

2084 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);

2087 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {

2088 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);

2089 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());

2090 }

2091

2096 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);

2097 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());

2098 }

2099 }

2100

2101

2102

2103 const APInt *MinC, *MaxC;

2104 auto CreateCanonicalClampForm = [&](bool IsSigned) {

2105 auto MaxIID = IsSigned ? Intrinsic::smax : Intrinsic::umax;

2106 auto MinIID = IsSigned ? Intrinsic::smin : Intrinsic::umin;

2107 Value *NewMax = Builder.CreateBinaryIntrinsic(

2108 MaxIID, X, ConstantInt::get(X->getType(), *MaxC));

2110 *II, Builder.CreateBinaryIntrinsic(

2111 MinIID, NewMax, ConstantInt::get(X->getType(), *MinC)));

2112 };

2113 if (IID == Intrinsic::smax &&

2117 return CreateCanonicalClampForm(true);

2118 if (IID == Intrinsic::umax &&

2122 return CreateCanonicalClampForm(false);

2123

2124

2125

2126 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&

2127 II->getType()->isIntOrIntVectorTy(1)) {

2128 return BinaryOperator::CreateAnd(I0, I1);

2129 }

2130

2131

2132

2133 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&

2134 II->getType()->isIntOrIntVectorTy(1)) {

2135 return BinaryOperator::CreateOr(I0, I1);

2136 }

2137

2138

2139

2140

2141

2142

2143 if (IID == Intrinsic::smin) {

2146 Value *Zero = ConstantInt::get(X->getType(), 0);

2148 CI,

2149 Builder.CreateIntrinsic(II->getType(), Intrinsic::scmp, {X, Zero}));

2150 }

2151 }

2152

2153 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {

2154

2155

2156

2158 (I0->hasOneUse() || I1->hasOneUse())) {

2160 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y);

2162 }

2163 }

2164

2165

2166

2167

2168

2169

2170

2171

2172

2173

2174

2175

2176

2180 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;

2181 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;

2182

2183 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {

2185 if (KnownSign == std::nullopt) {

2186 UseOr = false;

2187 UseAndN = false;

2188 } else if (*KnownSign ) {

2189 UseOr ^= true;

2190 UseAndN ^= true;

2192

2193

2194

2197 }

2198 }

2199 if (UseOr)

2200 return BinaryOperator::CreateOr(I0, X);

2201 else if (UseAndN)

2202 return BinaryOperator::CreateAnd(I0, Builder.CreateNot(X));

2203 }

2204

2205

2206

2207

2208

2209

2210

2211

2218 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY);

2220 }

2221 }

2222 return nullptr;

2223 };

2224

2225 if (Instruction *I = moveNotAfterMinMax(I0, I1))

2226 return I;

2227 if (Instruction *I = moveNotAfterMinMax(I1, I0))

2228 return I;

2229

2231 return I;

2232

2233

2234 const APInt *RHSC;

2237 return BinaryOperator::CreateAnd(Builder.CreateBinaryIntrinsic(IID, X, Y),

2238 ConstantInt::get(II->getType(), *RHSC));

2239

2240

2241

2242

2243

2245

2246

2247

2248 if (I0->hasOneUse() && !I1->hasOneUse())

2250

2251

2252

2253 bool IntMinIsPoison = isKnownNegation(I0, I1, true);

2255 Intrinsic::abs, I0,

2257

2258

2259

2260 if (IID == Intrinsic::smin || IID == Intrinsic::umax)

2261 Abs = Builder.CreateNeg(Abs, "nabs", IntMinIsPoison);

2263 }

2264

2266 return Sel;

2267

2269 return SAdd;

2270

2273

2275 return R;

2276

2278 return NewMinMax;

2279

2280

2286 I0, IsSigned, SQ.getWithInstruction(II));

2288 if (LHS_CR.icmp(Pred, *RHSC))

2292 ConstantInt::get(II->getType(), *RHSC));

2293 }

2294 }

2295

2298

2299 break;

2300 }

2301 case Intrinsic::scmp: {

2302 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);

2303 Value *LHS, *RHS;

2306 CI,

2307 Builder.CreateIntrinsic(II->getType(), Intrinsic::scmp, {LHS, RHS}));

2308 break;

2309 }

2310 case Intrinsic::bitreverse: {

2311 Value *IIOperand = II->getArgOperand(0);

2312

2315 X->getType()->isIntOrIntVectorTy(1)) {

2316 Type *Ty = II->getType();

2320 }

2321

2324 return crossLogicOpFold;

2325

2326 break;

2327 }

2328 case Intrinsic::bswap: {

2329 Value *IIOperand = II->getArgOperand(0);

2330

2331

2332

2333

2334

2339 Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X);

2342 ? Instruction::LShr

2343 : Instruction::Shl;

2345 }

2346 }

2347

2352

2353

2354 if (BW - LZ - TZ == 8) {

2355 assert(LZ != TZ && "active byte cannot be in the middle");

2356 if (LZ > TZ)

2357 return BinaryOperator::CreateNUWShl(

2358 IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ));

2359

2360 return BinaryOperator::CreateExactLShr(

2361 IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ));

2362 }

2363

2364

2366 unsigned C = X->getType()->getScalarSizeInBits() - BW;

2367 Value *CV = ConstantInt::get(X->getType(), C);

2370 }

2371

2374 return crossLogicOpFold;

2375 }

2376

2377

2379 true))

2380 return BitOp;

2381 break;

2382 }

2383 case Intrinsic::masked_load:

2384 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))

2386 break;

2387 case Intrinsic::masked_store:

2388 return simplifyMaskedStore(*II);

2389 case Intrinsic::masked_gather:

2390 return simplifyMaskedGather(*II);

2391 case Intrinsic::masked_scatter:

2392 return simplifyMaskedScatter(*II);

2393 case Intrinsic::launder_invariant_group:

2394 case Intrinsic::strip_invariant_group:

2397 break;

2398 case Intrinsic::powi:

2400

2401

2402 if (Power->isMinusOne())

2404 II->getArgOperand(0), II);

2405

2406 if (Power->equalsInt(2))

2408 II->getArgOperand(0), II);

2409

2410 if (!Power->getValue()[0]) {

2412

2413

2414

2415

2418 match(II->getArgOperand(0),

2421 }

2422 }

2423 break;

2424

2425 case Intrinsic::cttz:

2426 case Intrinsic::ctlz:

2428 return I;

2429 break;

2430

2431 case Intrinsic::ctpop:

2433 return I;

2434 break;

2435

2436 case Intrinsic::fshl:

2437 case Intrinsic::fshr: {

2438 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);

2439 Type *Ty = II->getType();

2440 unsigned BitWidth = Ty->getScalarSizeInBits();

2443

2447 if (!ModuloC)

2448 return nullptr;

2449 if (ModuloC != ShAmtC)

2451

2453 ShAmtC, DL),

2455 "Shift amount expected to be modulo bitwidth");

2456

2457

2458

2459

2460 if (IID == Intrinsic::fshr) {

2461

2463 return nullptr;

2464

2470 }

2471 assert(IID == Intrinsic::fshl &&

2472 "All funnel shifts by simple constants should go left");

2473

2474

2475

2477 return BinaryOperator::CreateShl(Op0, ShAmtC);

2478

2479

2480

2482 return BinaryOperator::CreateLShr(Op1,

2484

2485

2491 }

2494 true))

2495 return BitOp;

2496

2497

2498

2500 const APInt *ShAmtInnerC, *ShAmtOuterC;

2502 m_APInt(ShAmtInnerC))) &&

2503 match(ShAmtC, m_APInt(ShAmtOuterC)) && Op0 == Op1) {

2504 APInt Sum = *ShAmtOuterC + *ShAmtInnerC;

2506 if (Modulo.isZero())

2508 Constant *ModuloC = ConstantInt::get(Ty, Modulo);

2510 {InnerOp, InnerOp, ModuloC});

2511 }

2512 }

2513

2514

2515

2516

2522 Mod, IID == Intrinsic::fshl ? Intrinsic::fshr : Intrinsic::fshl, Ty);

2524 }

2525

2526

2527

2530 Value *Op2 = II->getArgOperand(2);

2532 return BinaryOperator::CreateShl(Op0, And);

2533 }

2534

2535

2537 return &CI;

2538

2539

2540

2541

2543 break;

2547 return &CI;

2548 break;

2549 }

2550 case Intrinsic::ptrmask: {

2551 unsigned BitWidth = DL.getPointerTypeSizeInBits(II->getType());

2554 return II;

2555

2556 Value *InnerPtr, *InnerMask;

2558

2559

2560

2561 if (match(II->getArgOperand(0),

2563 m_Value(InnerMask))))) {

2564 assert(II->getArgOperand(1)->getType() == InnerMask->getType() &&

2565 "Mask types must match");

2566

2567

2568 Value *NewMask = Builder.CreateAnd(II->getArgOperand(1), InnerMask);

2572 }

2573

2574

2575 if (!CI.hasRetAttr(Attribute::NonNull) &&

2580 }

2581

2582 unsigned NewAlignmentLog =

2585

2586

2591 }

2593 return &CI;

2594 break;

2595 }

2596 case Intrinsic::uadd_with_overflow:

2597 case Intrinsic::sadd_with_overflow: {

2598 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))

2599 return I;

2600

2601

2602

2603

2605 const APInt *C0, *C1;

2606 Value *Arg0 = II->getArgOperand(0);

2607 Value *Arg1 = II->getArgOperand(1);

2608 bool IsSigned = IID == Intrinsic::sadd_with_overflow;

2609 bool HasNWAdd = IsSigned

2613 bool Overflow;

2615 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);

2616 if (!Overflow)

2618 *II, Builder.CreateBinaryIntrinsic(

2619 IID, X, ConstantInt::get(Arg1->getType(), NewC)));

2620 }

2621 break;

2622 }

2623

2624 case Intrinsic::umul_with_overflow:

2625 case Intrinsic::smul_with_overflow:

2626 case Intrinsic::usub_with_overflow:

2627 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))

2628 return I;

2629 break;

2630

2631 case Intrinsic::ssub_with_overflow: {

2632 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))

2633 return I;

2634

2636 Value *Arg0 = II->getArgOperand(0);

2637 Value *Arg1 = II->getArgOperand(1);

2638

2639

2640

2641

2644

2645

2647 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,

2648 Arg0, NegVal));

2649 }

2650

2651 break;

2652 }

2653

2654 case Intrinsic::uadd_sat:

2655 case Intrinsic::sadd_sat:

2656 case Intrinsic::usub_sat:

2657 case Intrinsic::ssub_sat: {

2659 Type *Ty = SI->getType();

2660 Value *Arg0 = SI->getLHS();

2661 Value *Arg1 = SI->getRHS();

2662

2663

2665 Arg0, Arg1, SI);

2666 switch (OR) {

2668 break;

2670 if (SI->isSigned())

2672 else

2675 unsigned BitWidth = Ty->getScalarSizeInBits();

2678 }

2680 unsigned BitWidth = Ty->getScalarSizeInBits();

2683 }

2684 }

2685

2686

2687

2688

2689

2692 if (IID == Intrinsic::usub_sat &&

2695 auto *NewC = Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, C, C1);

2696 auto *NewSub =

2697 Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, NewC, A);

2699 }

2700

2701

2702 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&

2703 C->isNotMinSignedValue()) {

2706 *II, Builder.CreateBinaryIntrinsic(

2707 Intrinsic::sadd_sat, Arg0, NegVal));

2708 }

2709

2710

2711

2712

2715 const APInt *Val, *Val2;

2717 bool IsUnsigned =

2718 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;

2719 if (Other->getIntrinsicID() == IID &&

2723 if (IsUnsigned)

2724 NewVal = Val->uadd_sat(*Val2);

2726 bool Overflow;

2727 NewVal = Val->sadd_ov(*Val2, Overflow);

2728 if (Overflow) {

2729

2730

2731 break;

2732 }

2733 } else {

2734

2735 break;

2736 }

2737

2739 *II, Builder.CreateBinaryIntrinsic(

2740 IID, X, ConstantInt::get(II->getType(), NewVal)));

2741 }

2742 }

2743 break;

2744 }

2745

2746 case Intrinsic::minnum:

2747 case Intrinsic::maxnum:

2748 case Intrinsic::minimum:

2749 case Intrinsic::maximum: {

2750 Value *Arg0 = II->getArgOperand(0);

2751 Value *Arg1 = II->getArgOperand(1);

2755

2756

2757

2759 switch (IID) {

2760 case Intrinsic::maxnum:

2761 NewIID = Intrinsic::minnum;

2762 break;

2763 case Intrinsic::minnum:

2764 NewIID = Intrinsic::maxnum;

2765 break;

2766 case Intrinsic::maximum:

2767 NewIID = Intrinsic::minimum;

2768 break;

2769 case Intrinsic::minimum:

2770 NewIID = Intrinsic::maximum;

2771 break;

2772 default:

2774 }

2775 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);

2776 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);

2778 return FNeg;

2779 }

2780

2781

2784 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&

2790 switch (IID) {

2791 case Intrinsic::maxnum:

2792 Res = maxnum(*C1, *C2);

2793 break;

2794 case Intrinsic::minnum:

2795 Res = minnum(*C1, *C2);

2796 break;

2797 case Intrinsic::maximum:

2798 Res = maximum(*C1, *C2);

2799 break;

2800 case Intrinsic::minimum:

2801 Res = minimum(*C1, *C2);

2802 break;

2803 default:

2805 }

2806

2807

2808

2810 IID, X, ConstantFP::get(Arg0->getType(), Res),

2813 }

2814 }

2815

2816

2819 X->getType() == Y->getType()) {

2820 Value *NewCall =

2821 Builder.CreateBinaryIntrinsic(IID, X, Y, II, II->getName());

2822 return new FPExtInst(NewCall, II->getType());

2823 }

2824

2825

2826

2827

2828

2829

2830

2831 auto IsMinMaxOrXNegX = [IID, &X](Value *Op0, Value *Op1) {

2833 return Op0->hasOneUse() ||

2834 (IID != Intrinsic::minimum && IID != Intrinsic::minnum);

2835 return false;

2836 };

2837

2838 if (IsMinMaxOrXNegX(Arg0, Arg1) || IsMinMaxOrXNegX(Arg1, Arg0)) {

2839 Value *R = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II);

2840 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)

2841 R = Builder.CreateFNegFMF(R, II);

2843 }

2844

2845 break;

2846 }

2847 case Intrinsic::matrix_multiply: {

2848

2849

2850

2856 return II;

2857 }

2858

2859 Value *Op0 = II->getOperand(0);

2860 Value *Op1 = II->getOperand(1);

2861 Value *OpNotNeg, *NegatedOp;

2862 unsigned NegatedOpArg, OtherOpArg;

2864 NegatedOp = Op0;

2865 NegatedOpArg = 0;

2866 OtherOpArg = 1;

2868 NegatedOp = Op1;

2869 NegatedOpArg = 1;

2870 OtherOpArg = 0;

2871 } else

2872

2873 break;

2874

2875

2877 break;

2878

2879 Value *OtherOp = II->getOperand(OtherOpArg);

2886

2889 Value *InverseOtherOp = Builder.CreateFNeg(OtherOp);

2892 return II;

2893 }

2894

2897 NewArgs[NegatedOpArg] = OpNotNeg;

2899 Builder.CreateIntrinsic(II->getType(), IID, NewArgs, II);

2901 }

2902 break;

2903 }

2904 case Intrinsic::fmuladd: {

2905

2908 II->getFastMathFlags(), SQ.getWithInstruction(II)))

2910 II->getFastMathFlags());

2911

2912 [[fallthrough]];

2913 }

2914 case Intrinsic::fma: {

2915

2916 Value *Src0 = II->getArgOperand(0);

2917 Value *Src1 = II->getArgOperand(1);

2918 Value *Src2 = II->getArgOperand(2);

2923 return II;

2924 }

2925

2926

2931 return II;

2932 }

2933

2934

2935

2937 SQ.getWithInstruction(II)))

2939

2940

2941

2942

2944 (match(Src2, m_PosZeroFP()) && II->getFastMathFlags().noSignedZeros()))

2946

2947

2950

2951 break;

2952 }

2953 case Intrinsic::copysign: {

2954 Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1);

2957 if (*KnownSignBit) {

2958

2959

2960 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);

2962 }

2963

2964

2965

2966 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);

2968 }

2969

2970

2971

2974 Value *CopySign =

2977 }

2978

2979

2980

2981

2984 APFloat PosMagC = *MagC;

2987 }

2988

2989

2990

2991

2994

2995 break;

2996 }

2997 case Intrinsic::fabs: {

2999 Value *Arg = II->getArgOperand(0);

3001

3003 CallInst *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II);

3005 }

3006

3008

3011 CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal});

3012 CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal});

3017 SI->setFastMathFlags(FMF1 | FMF2);

3018 return SI;

3019 }

3020

3023

3026 }

3027

3028 Value *Magnitude, *Sign;

3029 if (match(II->getArgOperand(0),

3031

3033 Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Magnitude, II);

3035 }

3036

3037 [[fallthrough]];

3038 }

3039 case Intrinsic::ceil:

3040 case Intrinsic:🤣

3041 case Intrinsic::round:

3042 case Intrinsic::roundeven:

3043 case Intrinsic::nearbyint:

3044 case Intrinsic::rint:

3045 case Intrinsic::trunc: {

3048

3049 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);

3050 return new FPExtInst(NarrowII, II->getType());

3051 }

3052 break;

3053 }

3054 case Intrinsic::cos:

3055 case Intrinsic::amdgcn_cos: {

3057 Value *Src = II->getArgOperand(0);

3060

3061

3062

3064 }

3065 break;

3066 }

3067 case Intrinsic::sin:

3068 case Intrinsic::amdgcn_sin: {

3071

3072 Value *NewSin = Builder.CreateUnaryIntrinsic(IID, X, II);

3074 }

3075 break;

3076 }

3077 case Intrinsic::ldexp: {

3078

3079

3080

3081

3082

3083

3084

3085

3086

3087

3088

3089

3090 Value *Src = II->getArgOperand(0);

3091 Value *Exp = II->getArgOperand(1);

3092

3095

3097 Src->getType()->getScalarType()->getFltSemantics();

3098

3101 if (Scaled.isZero() && Scaled.isInfinity()) {

3102

3103 Constant *FPConst = ConstantFP::get(Src->getType(), Scaled);

3105 }

3106 }

3107

3108 Value *InnerSrc;

3109 Value *InnerExp;

3112 Exp->getType() == InnerExp->getType()) {

3115

3118

3119

3120 Value *NewExp = Builder.CreateAdd(InnerExp, Exp);

3121 II->setArgOperand(1, NewExp);

3122 II->setFastMathFlags(InnerFlags);

3124 }

3125 }

3126

3127

3128

3133 Builder.CreateSelect(ExtSrc, ConstantFP::get(II->getType(), 2.0),

3134 ConstantFP::get(II->getType(), 1.0));

3136 }

3140 Builder.CreateSelect(ExtSrc, ConstantFP::get(II->getType(), 0.5),

3141 ConstantFP::get(II->getType(), 1.0));

3143 }

3144

3145

3146

3147

3148

3149 Value *SelectCond, *SelectLHS, *SelectRHS;

3150 if (match(II->getArgOperand(1),

3152 m_Value(SelectRHS))))) {

3153 Value *NewLdexp = nullptr;

3156 NewLdexp = Builder.CreateLdexp(Src, SelectLHS, II);

3157 Select = Builder.CreateSelect(SelectCond, NewLdexp, Src);

3159 NewLdexp = Builder.CreateLdexp(Src, SelectRHS, II);

3160 Select = Builder.CreateSelect(SelectCond, Src, NewLdexp);

3161 }

3162

3163 if (NewLdexp) {

3166 }

3167 }

3168

3169 break;

3170 }

3171 case Intrinsic::ptrauth_auth:

3172 case Intrinsic::ptrauth_resign: {

3173

3174

3175 if (II->hasOperandBundles())

3176 break;

3177

3178

3179

3180 bool NeedSign = II->getIntrinsicID() == Intrinsic::ptrauth_resign;

3181 Value *Ptr = II->getArgOperand(0);

3183 Value *Disc = II->getArgOperand(2);

3184

3185

3186

3187 Value *AuthKey = nullptr, *AuthDisc = nullptr, *BasePtr;

3189

3190

3192 break;

3193

3195 if (CI->getIntrinsicID() == Intrinsic::ptrauth_sign) {

3197 break;

3198 } else if (CI->getIntrinsicID() == Intrinsic::ptrauth_resign) {

3200 break;

3203 } else

3204 break;

3206

3207

3209 if (!CPA || !CPA->isKnownCompatibleWith(Key, Disc, DL))

3210 break;

3211

3212

3218 SignDisc, Null,

3219 Null);

3223 }

3224

3225

3226 BasePtr = Builder.CreatePtrToInt(CPA->getPointer(), II->getType());

3227 } else

3228 break;

3229

3230 unsigned NewIntrin;

3231 if (AuthKey && NeedSign) {

3232

3233 NewIntrin = Intrinsic::ptrauth_resign;

3234 } else if (AuthKey) {

3235

3236 NewIntrin = Intrinsic::ptrauth_auth;

3237 } else if (NeedSign) {

3238

3239 NewIntrin = Intrinsic::ptrauth_sign;

3240 } else {

3241

3244 }

3245

3248 if (AuthKey) {

3251 }

3252

3253 if (NeedSign) {

3254 CallArgs.push_back(II->getArgOperand(3));

3255 CallArgs.push_back(II->getArgOperand(4));

3256 }

3257

3261 }

3262 case Intrinsic::arm_neon_vtbl1:

3263 case Intrinsic::arm_neon_vtbl2:

3264 case Intrinsic::arm_neon_vtbl3:

3265 case Intrinsic::arm_neon_vtbl4:

3266 case Intrinsic::aarch64_neon_tbl1:

3267 case Intrinsic::aarch64_neon_tbl2:

3268 case Intrinsic::aarch64_neon_tbl3:

3269 case Intrinsic::aarch64_neon_tbl4:

3271 case Intrinsic::arm_neon_vtbx1:

3272 case Intrinsic::arm_neon_vtbx2:

3273 case Intrinsic::arm_neon_vtbx3:

3274 case Intrinsic::arm_neon_vtbx4:

3275 case Intrinsic::aarch64_neon_tbx1:

3276 case Intrinsic::aarch64_neon_tbx2:

3277 case Intrinsic::aarch64_neon_tbx3:

3278 case Intrinsic::aarch64_neon_tbx4:

3280

3281 case Intrinsic::arm_neon_vmulls:

3282 case Intrinsic::arm_neon_vmullu:

3283 case Intrinsic::aarch64_neon_smull:

3284 case Intrinsic::aarch64_neon_umull: {

3285 Value *Arg0 = II->getArgOperand(0);

3286 Value *Arg1 = II->getArgOperand(1);

3287

3288

3291 }

3292

3293

3294 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||

3295 IID == Intrinsic::aarch64_neon_umull);

3299 Value *V0 = Builder.CreateIntCast(CV0, NewVT, !Zext);

3300 Value *V1 = Builder.CreateIntCast(CV1, NewVT, !Zext);

3302 }

3303

3304

3306 }

3307

3308

3312 if (Splat->isOne())

3314 !Zext);

3315

3316 break;

3317 }

3318 case Intrinsic::arm_neon_aesd:

3319 case Intrinsic::arm_neon_aese:

3320 case Intrinsic::aarch64_crypto_aesd:

3321 case Intrinsic::aarch64_crypto_aese:

3322 case Intrinsic::aarch64_sve_aesd:

3323 case Intrinsic::aarch64_sve_aese: {

3324 Value *DataArg = II->getArgOperand(0);

3325 Value *KeyArg = II->getArgOperand(1);

3326

3327

3330

3331

3337 return II;

3338 }

3339 break;

3340 }

3341 case Intrinsic::hexagon_V6_vandvrt:

3342 case Intrinsic::hexagon_V6_vandvrt_128B: {

3343

3346 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&

3347 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)

3348 break;

3349 Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1);

3352

3354 if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000))

3356 }

3357 break;

3358 }

3359 case Intrinsic::stackrestore: {

3360 enum class ClassifyResult {

3362 Alloca,

3363 StackRestore,

3364 CallWithSideEffects,

3365 };

3368 return ClassifyResult::Alloca;

3369

3372 if (II->getIntrinsicID() == Intrinsic::stackrestore)

3373 return ClassifyResult::StackRestore;

3374

3375 if (II->mayHaveSideEffects())

3376 return ClassifyResult::CallWithSideEffects;

3377 } else {

3378

3379 return ClassifyResult::CallWithSideEffects;

3380 }

3381 }

3382

3383 return ClassifyResult::None;

3384 };

3385

3386

3387

3388

3390 if (SS->getIntrinsicID() == Intrinsic::stacksave &&

3391 SS->getParent() == II->getParent()) {

3393 bool CannotRemove = false;

3394 for (++BI; &*BI != II; ++BI) {

3395 switch (Classify(&*BI)) {

3396 case ClassifyResult::None:

3397

3398 break;

3399

3400 case ClassifyResult::StackRestore:

3401

3402

3404 CannotRemove = true;

3405 break;

3406

3407 case ClassifyResult::Alloca:

3408 case ClassifyResult::CallWithSideEffects:

3409

3410

3411 CannotRemove = true;

3412 break;

3413 }

3414 if (CannotRemove)

3415 break;

3416 }

3417

3418 if (!CannotRemove)

3420 }

3421 }

3422

3423

3424

3426 Instruction *TI = II->getParent()->getTerminator();

3427 bool CannotRemove = false;

3428 for (++BI; &*BI != TI; ++BI) {

3429 switch (Classify(&*BI)) {

3430 case ClassifyResult::None:

3431

3432 break;

3433

3434 case ClassifyResult::StackRestore:

3435

3437

3438 case ClassifyResult::Alloca:

3439 case ClassifyResult::CallWithSideEffects:

3440

3441

3442

3443 CannotRemove = true;

3444 break;

3445 }

3446 if (CannotRemove)

3447 break;

3448 }

3449

3450

3451

3452

3455 break;

3456 }

3457 case Intrinsic::lifetime_end:

3458

3459

3460 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||

3461 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||

3462 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))

3463 break;

3464

3466 return I.getIntrinsicID() == Intrinsic::lifetime_start;

3467 }))

3468 return nullptr;

3469 break;

3470 case Intrinsic::assume: {

3471 Value *IIOperand = II->getArgOperand(0);

3473 II->getOperandBundlesAsDefs(OpBundles);

3474

3475

3476

3477

3483 return nullptr;

3484 };

3485

3486

3487

3490 return RemoveConditionFromAssume(Next);

3491

3492

3493

3494

3495 FunctionType *AssumeIntrinsicTy = II->getFunctionType();

3496 Value *AssumeIntrinsic = II->getCalledOperand();

3499 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles,

3500 II->getName());

3501 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());

3503 }

3504

3506 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,

3507 Builder.CreateNot(A), OpBundles, II->getName());

3508 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,

3509 Builder.CreateNot(B), II->getName());

3511 }

3512

3513

3514

3518 LHS->getOpcode() == Instruction::Load &&

3519 LHS->getType()->isPointerTy() &&

3522 LHS->setMetadata(LLVMContext::MD_nonnull, MD);

3523 LHS->setMetadata(LLVMContext::MD_noundef, MD);

3524 return RemoveConditionFromAssume(II);

3525

3526

3527

3528 }

3529

3530 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {

3532

3533

3534

3535

3536

3537 if (OBU.getTagName() == "separate_storage") {

3539 auto MaybeSimplifyHint = [&](const Use &U) {

3540 Value *Hint = U.get();

3541

3542

3546 };

3547 MaybeSimplifyHint(OBU.Inputs[0]);

3548 MaybeSimplifyHint(OBU.Inputs[1]);

3549 }

3550

3551

3552 if (OBU.getTagName() == "align" && OBU.Inputs.size() == 2) {

3555 if (!RK || RK.AttrKind != Attribute::Alignment ||

3557 continue;

3558

3559

3562

3563

3564

3565

3568 continue;

3569

3570

3571

3572

3576 if ((1ULL << TZ) < RK.ArgValue)

3577 continue;

3579 }

3580 }

3581

3582

3583

3584

3585

3586

3588 match(IIOperand,

3590 A->getType()->isPointerTy()) {

3593

3594 Replacement->insertBefore(Next->getIterator());

3595 AC.registerAssumption(Replacement);

3596 return RemoveConditionFromAssume(II);

3597 }

3598 }

3599

3600

3601

3602

3603

3604

3605

3606

3610 match(IIOperand,

3618

3619

3620

3621

3624 if (auto *Replacement =

3626

3627 Replacement->insertAfter(II->getIterator());

3628 AC.registerAssumption(Replacement);

3629 }

3630 return RemoveConditionFromAssume(II);

3631 }

3632 }

3633 }

3634

3635

3637 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {

3638 auto &BOI = II->bundle_op_info_begin()[Idx];

3641 if (BOI.End - BOI.Begin > 2)

3642 continue;

3643

3644

3649 if (CanonRK == RK)

3650 continue;

3651 if (!CanonRK) {

3652 if (BOI.End - BOI.Begin > 0) {

3653 Worklist.pushValue(II->op_begin()[BOI.Begin]);

3655 }

3656 continue;

3657 }

3659 if (BOI.End - BOI.Begin > 0)

3660 II->op_begin()[BOI.Begin].set(CanonRK.WasOn);

3661 if (BOI.End - BOI.Begin > 1)

3662 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(

3666 return II;

3667 }

3668 }

3669

3670

3671

3676

3677

3681 }

3682

3683

3684

3686 break;

3687 }

3688 case Intrinsic::experimental_guard: {

3689

3690

3691

3694

3696 break;

3698 }

3699 Value *NextCond = nullptr;

3700 if (match(NextInst,

3702 Value *CurrCond = II->getArgOperand(0);

3703

3704

3705

3706 if (CurrCond != NextCond) {

3708 while (MoveI != NextInst) {

3709 auto *Temp = MoveI;

3712 }

3714 }

3716 return II;

3717 }

3718 break;

3719 }

3720 case Intrinsic::vector_insert: {

3721 Value *Vec = II->getArgOperand(0);

3722 Value *SubVec = II->getArgOperand(1);

3723 Value *Idx = II->getArgOperand(2);

3727

3728

3729

3730 if (DstTy && VecTy && SubVecTy) {

3731 unsigned DstNumElts = DstTy->getNumElements();

3732 unsigned VecNumElts = VecTy->getNumElements();

3733 unsigned SubVecNumElts = SubVecTy->getNumElements();

3735

3736

3737 if (VecNumElts == SubVecNumElts)

3739

3740

3741

3742

3743

3745 unsigned i;

3746 for (i = 0; i != SubVecNumElts; ++i)

3748 for (; i != VecNumElts; ++i)

3750

3751 Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask);

3752

3754 for (unsigned i = 0; i != IdxN; ++i)

3755 Mask.push_back(i);

3756 for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)

3757 Mask.push_back(i);

3758 for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)

3759 Mask.push_back(i);

3760

3761 Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask);

3763 }

3764 break;

3765 }

3766 case Intrinsic::vector_extract: {

3767 Value *Vec = II->getArgOperand(0);

3768 Value *Idx = II->getArgOperand(1);

3769

3770 Type *ReturnType = II->getType();

3771

3772

3774 Value *InsertTuple, *InsertIdx, *InsertValue;

3778 InsertValue->getType() == ReturnType) {

3780

3781

3782

3783 if (ExtractIdx == Index)

3785

3786

3787

3788

3789

3790 else

3792 }

3793

3796

3797 if (DstTy && VecTy) {

3798 auto DstEltCnt = DstTy->getElementCount();

3799 auto VecEltCnt = VecTy->getElementCount();

3801

3802

3803 if (DstEltCnt == VecTy->getElementCount()) {

3806 }

3807

3808

3809

3810 if (VecEltCnt.isScalable() || DstEltCnt.isScalable())

3811 break;

3812

3814 for (unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i)

3815 Mask.push_back(IdxN + i);

3816

3817 Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask);

3819 }

3820 break;

3821 }

3822 case Intrinsic::experimental_vp_reverse: {

3824 Value *Vec = II->getArgOperand(0);

3825 Value *Mask = II->getArgOperand(1);

3827 break;

3828 Value *EVL = II->getArgOperand(2);

3829

3830

3836 OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(),

3837 II->getIterator());

3839 }

3840 break;

3841 }

3842 case Intrinsic::vector_reduce_or:

3843 case Intrinsic::vector_reduce_and: {

3844

3845

3846

3847

3848

3849

3850

3851 Value *Arg = II->getArgOperand(0);

3853

3854 if (Value *NewOp =

3857 return II;

3858 }

3859

3862 if (FTy->getElementType() == Builder.getInt1Ty()) {

3864 Vect, Builder.getIntNTy(FTy->getNumElements()));

3865 if (IID == Intrinsic::vector_reduce_and) {

3866 Res = Builder.CreateICmpEQ(

3868 } else {

3869 assert(IID == Intrinsic::vector_reduce_or &&

3870 "Expected or reduction.");

3871 Res = Builder.CreateIsNotNull(Res);

3872 }

3873 if (Arg != Vect)

3875 II->getType());

3877 }

3878 }

3879 [[fallthrough]];

3880 }

3881 case Intrinsic::vector_reduce_add: {

3882 if (IID == Intrinsic::vector_reduce_add) {

3883

3884

3885

3886

3887

3888

3889 Value *Arg = II->getArgOperand(0);

3891

3892 if (Value *NewOp =

3895 return II;

3896 }

3897

3900 if (FTy->getElementType() == Builder.getInt1Ty()) {

3902 Vect, Builder.getIntNTy(FTy->getNumElements()));

3903 Value *Res = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V);

3904 if (Res->getType() != II->getType())

3905 Res = Builder.CreateZExtOrTrunc(Res, II->getType());

3906 if (Arg != Vect &&

3908 Res = Builder.CreateNeg(Res);

3910 }

3911 }

3912

3913

3917 if (VecToReduceCount.isFixed()) {

3918 unsigned VectorSize = VecToReduceCount.getFixedValue();

3919 return BinaryOperator::CreateMul(

3921 ConstantInt::get(Splat->getType(), VectorSize, false,

3922 true));

3923 }

3924 }

3925 }

3926 [[fallthrough]];

3927 }

3928 case Intrinsic::vector_reduce_xor: {

3929 if (IID == Intrinsic::vector_reduce_xor) {

3930

3931

3932

3933

3934

3935

3936

3937 Value *Arg = II->getArgOperand(0);

3939

3940 if (Value *NewOp =

3943 return II;

3944 }

3945

3948 if (VTy->getElementType() == Builder.getInt1Ty()) {

3950 if (Arg != Vect)

3952 II->getType());

3954 }

3955 }

3956 }

3957 [[fallthrough]];

3958 }

3959 case Intrinsic::vector_reduce_mul: {

3960 if (IID == Intrinsic::vector_reduce_mul) {

3961

3962

3963

3964

3965

3966

3967 Value *Arg = II->getArgOperand(0);

3969

3970 if (Value *NewOp =

3973 return II;

3974 }

3975

3978 if (VTy->getElementType() == Builder.getInt1Ty()) {

3980 if (Res->getType() != II->getType())

3981 Res = Builder.CreateZExt(Res, II->getType());

3983 }

3984 }

3985 }

3986 [[fallthrough]];

3987 }

3988 case Intrinsic::vector_reduce_umin:

3989 case Intrinsic::vector_reduce_umax: {

3990 if (IID == Intrinsic::vector_reduce_umin ||

3991 IID == Intrinsic::vector_reduce_umax) {

3992

3993

3994

3995

3996

3997

3998 Value *Arg = II->getArgOperand(0);

4000

4001 if (Value *NewOp =

4004 return II;

4005 }

4006

4009 if (VTy->getElementType() == Builder.getInt1Ty()) {

4010 Value *Res = IID == Intrinsic::vector_reduce_umin

4011 ? Builder.CreateAndReduce(Vect)

4012 : Builder.CreateOrReduce(Vect);

4013 if (Arg != Vect)

4015 II->getType());

4017 }

4018 }

4019 }

4020 [[fallthrough]];

4021 }

4022 case Intrinsic::vector_reduce_smin:

4023 case Intrinsic::vector_reduce_smax: {

4024 if (IID == Intrinsic::vector_reduce_smin ||

4025 IID == Intrinsic::vector_reduce_smax) {

4026

4027

4028

4029

4030

4031

4032

4033

4034

4035

4036

4037

4038

4039

4040 Value *Arg = II->getArgOperand(0);

4042

4043 if (Value *NewOp =

4046 return II;

4047 }

4048

4051 if (VTy->getElementType() == Builder.getInt1Ty()) {

4053 if (Arg != Vect)

4055 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==

4056 (ExtOpc == Instruction::CastOps::ZExt))

4057 ? Builder.CreateAndReduce(Vect)

4058 : Builder.CreateOrReduce(Vect);

4059 if (Arg != Vect)

4060 Res = Builder.CreateCast(ExtOpc, Res, II->getType());

4062 }

4063 }

4064 }

4065 [[fallthrough]];

4066 }

4067 case Intrinsic::vector_reduce_fmax:

4068 case Intrinsic::vector_reduce_fmin:

4069 case Intrinsic::vector_reduce_fadd:

4070 case Intrinsic::vector_reduce_fmul: {

4071 bool CanReorderLanes = (IID != Intrinsic::vector_reduce_fadd &&

4072 IID != Intrinsic::vector_reduce_fmul) ||

4073 II->hasAllowReassoc();

4074 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||

4075 IID == Intrinsic::vector_reduce_fmul)

4076 ? 1

4077 : 0;

4078 Value *Arg = II->getArgOperand(ArgIdx);

4080 replaceUse(II->getOperandUse(ArgIdx), NewOp);

4081 return nullptr;

4082 }

4083 break;

4084 }

4085 case Intrinsic::is_fpclass: {

4087 return I;

4088 break;

4089 }

4090 case Intrinsic::threadlocal_address: {

4095 return II;

4096 }

4097 break;

4098 }

4099 case Intrinsic::frexp: {

4101

4102

4103

4106 X = Builder.CreateInsertValue(

4108 1);

4110 }

4111 }

4112 break;

4113 }

4114 case Intrinsic::get_active_lane_mask: {

4115 const APInt *Op0, *Op1;

4118 Type *OpTy = II->getOperand(0)->getType();

4121 II->getType(), Intrinsic::get_active_lane_mask,

4122 {Constant::getNullValue(OpTy),

4123 ConstantInt::get(OpTy, Op1->usub_sat(*Op0))}));

4124 }

4125 break;

4126 }

4127 case Intrinsic::experimental_get_vector_length: {

4128

4130 std::max(II->getArgOperand(0)->getType()->getScalarSizeInBits(),

4131 II->getType()->getScalarSizeInBits());

4134 SQ.getWithInstruction(II))

4137 ->getValue()

4140 MaxLanes = MaxLanes.multiply(

4142

4145 *II, Builder.CreateZExtOrTrunc(II->getArgOperand(0), II->getType()));

4146 return nullptr;

4147 }

4148 default: {

4149

4151 if (V)

4152 return *V;

4153 break;

4154 }

4155 }

4156

4157

4158

4159

4160

4161

4162

4166 bool IsVectorCond = Sel->getCondition()->getType()->isVectorTy();

4168 continue;

4169

4170

4171 bool SimplifyBothArms =

4172 Op->getType()->isVectorTy() && II->getType()->isVectorTy();

4174 *II, Sel, false, SimplifyBothArms))

4175 return R;

4176 }

4179 return R;

4180 }

4181 }

4182

4184 return Shuf;

4185

4188

4191

4192

4193

4194 return visitCallBase(*II);

4195}

4196

4197

4200

4201

4204

4205

4206 auto isIdenticalOrStrongerFence = [](FenceInst *FI1, FenceInst *FI2) {

4208

4209 if (FI1SyncScope != FI2->getSyncScopeID() ||

4212 return false;

4213

4215 };

4216 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))

4218

4220 if (isIdenticalOrStrongerFence(PFI, &FI))

4222 return nullptr;

4223}

4224

4225

4227 return visitCallBase(II);

4228}

4229

4230

4232 return visitCallBase(CBI);

4233}

4234

4236 if (!CI->hasFnAttr("modular-format"))

4237 return nullptr;

4238

4241

4242 unsigned FirstArgIdx;

4243 [[maybe_unused]] bool Error;

4244 Error = Args[2].getAsInteger(10, FirstArgIdx);

4245 assert(Error && "invalid first arg index");

4246 --FirstArgIdx;

4250

4251 if (AllAspects.empty())

4252 return nullptr;

4253

4255 for (StringRef Aspect : AllAspects) {

4256 if (Aspect == "float") {

4260 [](Value *V) { return V->getType()->isFloatingPointTy(); }))

4261 NeededAspects.push_back("float");

4262 } else {

4263

4264 NeededAspects.push_back(Aspect);

4265 }

4266 }

4267

4268 if (NeededAspects.size() == AllAspects.size())

4269 return nullptr;

4270

4275 FnName, Callee->getFunctionType(),

4276 Callee->getAttributes().removeFnAttribute(Ctx, "modular-format"));

4278 New->setCalledFunction(ModularFn);

4279 New->removeFnAttr("modular-format");

4280 B.Insert(New);

4281

4282 const auto ReferenceAspect = [&](StringRef Aspect) {

4284 Name += '_';

4285 Name += Aspect;

4288 B.CreateCall(RelocNoneFn,

4290 };

4291

4293 for (StringRef Request : NeededAspects)

4294 ReferenceAspect(Request);

4295

4296 return New;

4297}

4298

4301

4302

4303

4304

4306 return nullptr;

4307

4308 auto InstCombineRAUW = [this](Instruction *From, Value *With) {

4310 };

4311 auto InstCombineErase = [this](Instruction *I) {

4313 };

4315 InstCombineRAUW, InstCombineErase);

4316 if (Value *With = Simplifier.optimizeCall(CI, Builder)) {

4317 ++NumSimplified;

4319 }

4321 ++NumSimplified;

4323 }

4324

4325 return nullptr;

4326}

4327

4329

4330

4332 if (Underlying != TrampMem &&

4333 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))

4334 return nullptr;

4336 return nullptr;

4337

4339 for (User *U : TrampMem->users()) {

4341 if (II)

4342 return nullptr;

4343 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {

4344 if (InitTrampoline)

4345

4346 return nullptr;

4347 InitTrampoline = II;

4348 continue;

4349 }

4350 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)

4351

4352 continue;

4353 return nullptr;

4354 }

4355

4356

4357 if (!InitTrampoline)

4358 return nullptr;

4359

4360

4361 if (InitTrampoline->getOperand(0) != TrampMem)

4362 return nullptr;

4363

4364 return InitTrampoline;

4365}

4366

4368 Value *TrampMem) {

4369

4370

4372 E = AdjustTramp->getParent()->begin();

4373 I != E;) {

4376 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&

4377 II->getOperand(0) == TrampMem)

4378 return II;

4380 return nullptr;

4381 }

4382 return nullptr;

4383}

4384

4385

4386

4387

4389 Callee = Callee->stripPointerCasts();

4391 if (!AdjustTramp ||

4392 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)

4393 return nullptr;

4394

4396

4398 return IT;

4400 return IT;

4401 return nullptr;

4402}

4403

4407 if (!IPC || !IPC->isNoopCast(DL))

4408 return nullptr;

4409

4411 if (II)

4412 return nullptr;

4413

4415 if (IIID != Intrinsic::ptrauth_resign && IIID != Intrinsic::ptrauth_sign)

4416 return nullptr;

4417

4418

4419 std::optional PtrAuthBundleOrNone;

4424 PtrAuthBundleOrNone = Bundle;

4425 else

4427 }

4428

4429 if (!PtrAuthBundleOrNone)

4430 return nullptr;

4431

4432 Value *NewCallee = nullptr;

4433 switch (IIID) {

4434

4435

4436 case Intrinsic::ptrauth_resign: {

4437

4438 if (II->getOperand(3) != PtrAuthBundleOrNone->Inputs[0])

4439 return nullptr;

4440

4441 if (II->getOperand(4) != PtrAuthBundleOrNone->Inputs[1])

4442 return nullptr;

4443

4444

4445

4446 if (II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])

4447 return nullptr;

4448

4449 Value *NewBundleOps[] = {II->getOperand(1), II->getOperand(2)};

4450 NewBundles.emplace_back("ptrauth", NewBundleOps);

4451 NewCallee = II->getOperand(0);

4452 break;

4453 }

4454

4455

4456

4457

4458 case Intrinsic::ptrauth_sign: {

4459

4460 if (II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])

4461 return nullptr;

4462

4463 if (II->getOperand(2) != PtrAuthBundleOrNone->Inputs[1])

4464 return nullptr;

4465 NewCallee = II->getOperand(0);

4466 break;

4467 }

4468 default:

4470 }

4471

4472 if (!NewCallee)

4473 return nullptr;

4474

4475 NewCallee = Builder.CreateBitOrPointerCast(NewCallee, Callee->getType());

4478 return NewCall;

4479}

4480

4483 if (!CPA)

4484 return nullptr;

4485

4487

4488 if (!CalleeF)

4489 return nullptr;

4490

4491

4493 if (!PAB)

4494 return nullptr;

4495

4498

4499

4500 if (!CPA->isKnownCompatibleWith(Key, Discriminator, DL))

4501 return nullptr;

4502

4503

4506 return NewCall;

4507}

4508

4509bool InstCombinerImpl::annotateAnyAllocSite(CallBase &Call,

4511

4512

4513

4514

4516

4519

4522

4523

4528 } else {

4532 }

4533 }

4534

4535

4537 if (!Alignment)

4539

4542 uint64_t AlignmentVal = AlignOpC->getZExtValue();

4545 Align NewAlign = Align(AlignmentVal);

4546 if (NewAlign > ExistingAlign) {

4550 }

4551 }

4552 }

4554}

4555

4556

4559

4560

4561

4562

4563 SmallVector<unsigned, 4> ArgNos;

4564 unsigned ArgNo = 0;

4565

4567 if (V->getType()->isPointerTy()) {

4568

4569

4572 (HasDereferenceable &&

4574 V->getType()->getPointerAddressSpace()))) {

4575 if (Value *Res = simplifyNonNullOperand(V, HasDereferenceable)) {

4578 }

4582 }

4583 }

4584 ArgNo++;

4585 }

4586

4587 assert(ArgNo == Call.arg_size() && "Call arguments not processed correctly.");

4588

4589 if (!ArgNos.empty()) {

4592 AS = AS.addParamAttribute(Ctx, ArgNos,

4596 }

4597

4598

4599

4603 transformConstExprCastCall(Call))

4604 return nullptr;

4605

4606 if (CalleeF) {

4607

4611 << "\n");

4613 return &Call;

4614 }

4615

4616

4617

4618

4624

4625

4626

4630

4631

4636

4637

4638

4642 return nullptr;

4643 }

4644 }

4645

4646

4647

4651

4652

4655

4657

4658 return nullptr;

4659 }

4660

4661

4664 }

4665

4667 return transformCallThroughTrampoline(Call, *II);

4668

4669

4670 if (Instruction *NewCall = foldPtrAuthIntrinsicCallee(Call))

4671 return NewCall;

4672

4673

4674 if (Instruction *NewCall = foldPtrAuthConstantCallee(Call))

4675 return NewCall;

4676

4679 if (IA->canThrow()) {

4680

4681

4684 }

4685 }

4686

4687

4688

4689

4692

4693

4695 }

4696

4700 Type *RetArgTy = ReturnedArg->getType();

4703 Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy));

4704 }

4705

4706

4707

4711 }

4712

4713

4714

4718 if (CalleeF) {

4719 ConstantInt *FunctionType = nullptr;

4721

4722 if (MDNode *MD = CalleeF->getMetadata(LLVMContext::MD_kcfi_type))

4724

4725 if (FunctionType &&

4729 << ": call to " << CalleeF->getName()

4730 << " using a mismatching function pointer type\n";

4731 }

4732 });

4733

4735 }

4736

4739

4740

4742 case Intrinsic::experimental_gc_statepoint: {

4744 SmallPtrSet<Value *, 32> LiveGcValues;

4745 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {

4746 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);

4747

4748

4751 continue;

4752 }

4753

4756

4757

4761 continue;

4762 }

4763

4765

4766

4767

4769

4772 continue;

4773 }

4774

4775

4776 if (!GCR.hasRetAttr(Attribute::NonNull) &&

4780

4781 Worklist.pushUsersToWorkList(GCR);

4782 }

4783 }

4784

4785

4786

4791 }

4792

4793

4794

4795

4796

4797 LiveGcValues.insert(BasePtr);

4798 LiveGcValues.insert(DerivedPtr);

4799 }

4800 std::optional Bundle =

4802 unsigned NumOfGCLives = LiveGcValues.size();

4803 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())

4804 break;

4805

4806 DenseMap<Value *, unsigned> Val2Idx;

4807 std::vector<Value *> NewLiveGc;

4808 for (Value *V : Bundle->Inputs) {

4810 if (!Inserted)

4811 continue;

4812 if (LiveGcValues.count(V)) {

4813 It->second = NewLiveGc.size();

4814 NewLiveGc.push_back(V);

4815 } else

4816 It->second = NumOfGCLives;

4817 }

4818

4819 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {

4820 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);

4822 assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&

4823 "Missed live gc for base pointer");

4825 GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));

4827 assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&

4828 "Missed live gc for derived pointer");

4830 GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));

4831 }

4832

4835 }

4836 default: { break; }

4837 }

4838

4840}

4841

4842

4843

4844

4845bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {

4848 if (!Callee)

4849 return false;

4850

4852 "CallBr's don't have a single point after a def to insert at");

4853

4854

4855

4856

4857 if (Callee->isDeclaration())

4858 return false;

4859

4860

4861

4862

4863 if (Callee->hasFnAttribute("thunk"))

4864 return false;

4865

4866

4867

4868

4869 if (Callee->hasFnAttribute(Attribute::Naked))

4870 return false;

4871

4872

4873

4874

4875

4877 return false;

4878

4881

4882

4883

4884

4885 FunctionType *FT = Callee->getFunctionType();

4887 Type *NewRetTy = FT->getReturnType();

4888

4889

4890 if (OldRetTy != NewRetTy) {

4891

4893 return false;

4894

4896 if (Caller->use_empty())

4897 return false;

4898 }

4899

4900 if (!CallerPAL.isEmpty() && Caller->use_empty()) {

4901 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());

4902 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(

4903 NewRetTy, CallerPAL.getRetAttrs())))

4904 return false;

4905 }

4906

4907

4908

4909

4910

4911 if (Caller->use_empty()) {

4912 BasicBlock *PhisNotSupportedBlock = nullptr;

4914 PhisNotSupportedBlock = II->getNormalDest();

4915 if (PhisNotSupportedBlock)

4916 for (User *U : Caller->users())

4918 if (PN->getParent() == PhisNotSupportedBlock)

4919 return false;

4920 }

4921 }

4922

4924 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);

4925

4926

4927

4928

4929

4930

4931

4932

4933

4934 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||

4935 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))

4936 return false;

4937

4939 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {

4940 Type *ParamTy = FT->getParamType(i);

4941 Type *ActTy = (*AI)->getType();

4942

4944 return false;

4945

4946

4947 if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i))

4948 .overlaps(AttributeFuncs::typeIncompatible(

4949 ParamTy, CallerPAL.getParamAttrs(i),

4950 AttributeFuncs::ASK_UNSAFE_TO_DROP)))

4951 return false;

4952

4954 CallerPAL.hasParamAttr(i, Attribute::Preallocated))

4955 return false;

4956

4957 if (CallerPAL.hasParamAttr(i, Attribute::SwiftError))

4958 return false;

4959

4960 if (CallerPAL.hasParamAttr(i, Attribute::ByVal) !=

4961 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))

4962 return false;

4963 }

4964

4965 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&

4966 !CallerPAL.isEmpty()) {

4967

4968

4969

4970 unsigned SRetIdx;

4971 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&

4972 SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams())

4973 return false;

4974 }

4975

4976

4977

4978 SmallVector<Value *, 8> Args;

4980 Args.reserve(NumActualArgs);

4981 ArgAttrs.reserve(NumActualArgs);

4982

4983

4984 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());

4985

4986

4987

4988 RAttrs.remove(

4989 AttributeFuncs::typeIncompatible(NewRetTy, CallerPAL.getRetAttrs()));

4990

4993 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {

4994 Type *ParamTy = FT->getParamType(i);

4995

4996 Value *NewArg = *AI;

4997 if ((*AI)->getType() != ParamTy)

4998 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);

4999 Args.push_back(NewArg);

5000

5001

5002

5003 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(

5004 ParamTy, CallerPAL.getParamAttrs(i), AttributeFuncs::ASK_SAFE_TO_DROP);

5006 CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs));

5007 }

5008

5009

5010

5011 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {

5013 ArgAttrs.push_back(AttributeSet());

5014 }

5015

5016

5017 if (FT->getNumParams() < NumActualArgs) {

5018

5019 if (FT->isVarArg()) {

5020

5021 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {

5023 Value *NewArg = *AI;

5024 if (PTy != (*AI)->getType()) {

5025

5028 NewArg = Builder.CreateCast(opcode, *AI, PTy);

5029 }

5030 Args.push_back(NewArg);

5031

5032

5033 ArgAttrs.push_back(CallerPAL.getParamAttrs(i));

5034 }

5035 }

5036 }

5037

5038 AttributeSet FnAttrs = CallerPAL.getFnAttrs();

5039

5041 Caller->setName("");

5042

5043 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&

5044 "missing argument attributes");

5045 AttributeList NewCallerPAL = AttributeList::get(

5047

5050

5051 CallBase *NewCall;

5053 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),

5054 II->getUnwindDest(), Args, OpBundles);

5055 } else {

5056 NewCall = Builder.CreateCall(Callee, Args, OpBundles);

5059 }

5063

5064

5065 NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof});

5066

5067

5070 if (OldRetTy != NV->getType() && Caller->use_empty()) {

5071 assert(NV->getType()->isVoidTy());

5073 NC->setDebugLoc(Caller->getDebugLoc());

5074

5076 assert(OptInsertPt && "No place to insert cast");

5078 Worklist.pushUsersToWorkList(*Caller);

5079 }

5080

5081 if (Caller->use_empty())

5083 else if (Caller->hasValueHandle()) {

5084 if (OldRetTy == NV->getType())

5086 else

5087

5088

5090 }

5091

5093 return true;

5094}

5095

5096

5097

5099InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call,

5103

5104

5105

5106 if (Attrs.hasAttrSomewhere(Attribute::Nest))

5107 return nullptr;

5108

5111

5112 AttributeList NestAttrs = NestF->getAttributes();

5113 if (!NestAttrs.isEmpty()) {

5114 unsigned NestArgNo = 0;

5115 Type *NestTy = nullptr;

5116 AttributeSet NestAttr;

5117

5118

5120 E = NestFTy->param_end();

5121 I != E; ++NestArgNo, ++I) {

5122 AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo);

5124

5125 NestTy = *I;

5126 NestAttr = AS;

5127 break;

5128 }

5129 }

5130

5131 if (NestTy) {

5132 std::vector<Value*> NewArgs;

5133 std::vector NewArgAttrs;

5136

5137

5138

5139

5140 {

5141 unsigned ArgNo = 0;

5143 do {

5144 if (ArgNo == NestArgNo) {

5145

5147 if (NestVal->getType() != NestTy)

5148 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");

5149 NewArgs.push_back(NestVal);

5150 NewArgAttrs.push_back(NestAttr);

5151 }

5152

5153 if (I == E)

5154 break;

5155

5156

5157 NewArgs.push_back(*I);

5158 NewArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));

5159

5160 ++ArgNo;

5161 ++I;

5162 } while (true);

5163 }

5164

5165

5166

5167

5168

5169 std::vector<Type*> NewTypes;

5170 NewTypes.reserve(FTy->getNumParams()+1);

5171

5172

5173

5174 {

5175 unsigned ArgNo = 0;

5177 E = FTy->param_end();

5178

5179 do {

5180 if (ArgNo == NestArgNo)

5181

5182 NewTypes.push_back(NestTy);

5183

5184 if (I == E)

5185 break;

5186

5187

5188 NewTypes.push_back(*I);

5189

5190 ++ArgNo;

5191 ++I;

5192 } while (true);

5193 }

5194

5195

5196

5197 FunctionType *NewFTy =

5198 FunctionType::get(FTy->getReturnType(), NewTypes, FTy->isVarArg());

5199 AttributeList NewPAL =

5200 AttributeList::get(FTy->getContext(), Attrs.getFnAttrs(),

5201 Attrs.getRetAttrs(), NewArgAttrs);

5202

5205

5209 II->getUnwindDest(), NewArgs, OpBundles);

5213 NewCaller =

5215 CBI->getIndirectDests(), NewArgs, OpBundles);

5216 cast(NewCaller)->setCallingConv(CBI->getCallingConv());

5218 } else {

5219 NewCaller = CallInst::Create(NewFTy, NestF, NewArgs, OpBundles);

5225 }

5227

5228 return NewCaller;

5229 }

5230 }

5231

5232

5233

5234

5236 return &Call;

5237}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

AMDGPU Register Bank Select

This file declares a class to represent arbitrary precision floating point values and provide a varie...

This file implements a class to represent arbitrary precision integral constant values and operations...

This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))

Atomic ordering constants.

This file contains the simple types necessary to represent the attributes associated with functions a...

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

This file contains the declarations for the subclasses of Constant, which represent the different fla...

static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG)

static Type * getPromotedType(Type *Ty)

Return the specified type promoted as it would be to pass though a va_arg area.

Definition InstCombineCalls.cpp:97

static Instruction * createOverflowTuple(IntrinsicInst *II, Value *Result, Constant *Overflow)

Creates a result tuple for an overflow intrinsic II with a given Result and a constant Overflow value...

Definition InstCombineCalls.cpp:928

static IntrinsicInst * findInitTrampolineFromAlloca(Value *TrampMem)

Definition InstCombineCalls.cpp:4328

static bool removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, std::function< bool(const IntrinsicInst &)> IsStart)

Definition InstCombineCalls.cpp:877

static bool inputDenormalIsDAZ(const Function &F, const Type *Ty)

Definition InstCombineCalls.cpp:981

static Instruction * reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)

If this min/max has a matching min/max operand with a constant, try to push the constant operand into...

Definition InstCombineCalls.cpp:1400

static bool isIdempotentBinaryIntrinsic(Intrinsic::ID IID)

Helper to match idempotent binary intrinsics, namely, intrinsics where f(f(x, y), y) == f(x,...

Definition InstCombineCalls.cpp:1621

static bool signBitMustBeTheSame(Value *Op0, Value *Op1, const SimplifyQuery &SQ)

Return true if two values Op0 and Op1 are known to have the same sign.

Definition InstCombineCalls.cpp:1214

static Value * optimizeModularFormat(CallInst *CI, IRBuilderBase &B)

Definition InstCombineCalls.cpp:4235

static Instruction * moveAddAfterMinMax(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)

Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0.

Definition InstCombineCalls.cpp:1227

static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombinerImpl &IC)

This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...

Definition InstCombineCalls.cpp:446

static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)

Definition InstCombineCalls.cpp:857

static std::optional< bool > getKnownSign(Value *Op, const SimplifyQuery &SQ)

Definition InstCombineCalls.cpp:1187

static cl::opt< unsigned > GuardWideningWindow("instcombine-guard-widening-window", cl::init(3), cl::desc("How wide an instruction window to bypass looking for " "another guard"))

static bool hasUndefSource(AnyMemTransferInst *MI)

Recognize a memcpy/memmove from a trivially otherwise unused alloca.

Definition InstCombineCalls.cpp:108

static Instruction * factorizeMinMaxTree(IntrinsicInst *II)

Reduce a sequence of min/max intrinsics with a common operand.

Definition InstCombineCalls.cpp:1428

static Instruction * foldClampRangeOfTwo(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)

If we have a clamp pattern like max (min X, 42), 41 – where the output can only be one of two possibl...

Definition InstCombineCalls.cpp:1326

static Value * simplifyReductionOperand(Value *Arg, bool CanReorderLanes)

Definition InstCombineCalls.cpp:1664

static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)

Definition InstCombineCalls.cpp:4367

static Value * foldIntrinsicUsingDistributiveLaws(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)

Definition InstCombineCalls.cpp:1752

static std::optional< bool > getKnownSignOrZero(Value *Op, const SimplifyQuery &SQ)

Definition InstCombineCalls.cpp:1201

static Value * foldMinimumOverTrailingOrLeadingZeroCount(Value *I0, Value *I1, const DataLayout &DL, InstCombiner::BuilderTy &Builder)

Fold an unsigned minimum of trailing or leading zero bits counts: umin(cttz(CtOp, ZeroUndef),...

Definition InstCombineCalls.cpp:1697

static Value * foldIdempotentBinaryIntrinsicRecurrence(InstCombinerImpl &IC, IntrinsicInst *II)

Attempt to simplify value-accumulating recurrences of kind: umax.acc = phi i8 [ umax,...

Definition InstCombineCalls.cpp:1644

static Instruction * foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC)

Definition InstCombineCalls.cpp:652

static Instruction * simplifyNeonTbl(IntrinsicInst &II, InstCombiner &IC, bool IsExtension)

Convert tbl/tbx intrinsics to shufflevector if the mask is constant, and at most two source operands ...

Definition InstCombineCalls.cpp:742

static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC)

Definition InstCombineCalls.cpp:476

static IntrinsicInst * findInitTrampoline(Value *Callee)

Definition InstCombineCalls.cpp:4388

static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, const Function &F, Type *Ty)

Definition InstCombineCalls.cpp:989

static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)

Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".

Definition InstCombineCalls.cpp:1730

static Value * reassociateMinMaxWithConstants(IntrinsicInst *II, IRBuilderBase &Builder, const SimplifyQuery &SQ)

If this min/max has a constant operand and an operand that is a matching min/max with a constant oper...

Definition InstCombineCalls.cpp:1366

static CallInst * canonicalizeConstantArg0ToArg1(CallInst &Call)

Definition InstCombineCalls.cpp:915

This file provides internal interfaces used to implement the InstCombine.

This file provides the interface for the instcombine pass implementation.

static bool hasNoSignedWrap(BinaryOperator &I)

static bool inputDenormalIsIEEE(DenormalMode Mode)

Return true if it's possible to assume IEEE treatment of input denormals in F for Val.

static const Function * getCalledFunction(const Value *V)

ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))

uint64_t IntrinsicInst * II

if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod

const SmallVectorImpl< MachineOperand > & Cond

This file implements the SmallBitVector class.

This file defines the SmallVector class.

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

#define DEBUG_WITH_TYPE(TYPE,...)

DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.

static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")

static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")

static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)

Returns the opcode of Values or ~0 if they do not all agree.

static constexpr roundingMode rmNearestTiesToEven

static APFloat getOne(const fltSemantics &Sem, bool Negative=false)

Factory for Positive and Negative One.

Class for arbitrary precision integers.

static APInt getAllOnes(unsigned numBits)

Return an APInt of a specified width with all bits set.

static APInt getSignMask(unsigned BitWidth)

Get the SignMask for a specific bit width.

bool sgt(const APInt &RHS) const

Signed greater than comparison.

LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const

bool ugt(const APInt &RHS) const

Unsigned greater than comparison.

bool isZero() const

Determine if this value is zero, i.e. all bits are clear.

LLVM_ABI APInt urem(const APInt &RHS) const

Unsigned remainder operation.

unsigned getBitWidth() const

Return the number of bits in the APInt.

bool ult(const APInt &RHS) const

Unsigned less than comparison.

LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const

LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const

static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)

Return a value containing V broadcasted over NewLen bits.

static APInt getSignedMinValue(unsigned numBits)

Gets minimum signed value of APInt for a specific bit width.

LLVM_ABI APInt uadd_sat(const APInt &RHS) const

bool isNonNegative() const

Determine if this APInt Value is non-negative (>= 0)

static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)

Constructs an APInt value that has the bottom loBitsSet bits set.

static APInt getZero(unsigned numBits)

Get the '0' value for the specified bit-width.

LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const

static APSInt getMinValue(uint32_t numBits, bool Unsigned)

Return the APSInt representing the minimum integer value with the given bit width and signedness.

static APSInt getMaxValue(uint32_t numBits, bool Unsigned)

Return the APSInt representing the maximum integer value with the given bit width and signedness.

This class represents any memset intrinsic.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

ArrayRef< T > drop_front(size_t N=1) const

Drop the first N elements of the array.

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const

Return true if the attribute exists in this set.

static LLVM_ABI AttributeSet get(LLVMContext &C, const AttrBuilder &B)

static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)

Return a uniquified Attribute object.

static LLVM_ABI Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)

static LLVM_ABI Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)

LLVM_ABI StringRef getValueAsString() const

Return the attribute's value as a string.

static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)

Return a uniquified Attribute object that has the specific alignment set.

InstListType::reverse_iterator reverse_iterator

InstListType::iterator iterator

Instruction iterators...

LLVM_ABI bool isSigned() const

Whether the intrinsic is signed or unsigned.

LLVM_ABI Instruction::BinaryOps getBinaryOp() const

Returns the binary operation underlying the intrinsic.

static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...

static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")

static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)

Construct a binary instruction, given the opcode and the two operands.

static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")

static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

void setCallingConv(CallingConv::ID CC)

MaybeAlign getRetAlign() const

Extract the alignment of the return value.

LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const

Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.

OperandBundleUse getOperandBundleAt(unsigned Index) const

Return the operand bundle at a specific index.

std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const

Return an operand bundle by name, if present.

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

bool isInAllocaArgument(unsigned ArgNo) const

Determine whether this argument is passed in an alloca.

bool hasFnAttr(Attribute::AttrKind Kind) const

Determine whether this call has the given attribute.

bool hasRetAttr(Attribute::AttrKind Kind) const

Determine whether the return value has the given attribute.

unsigned getNumOperandBundles() const

Return the number of operand bundles associated with this User.

uint64_t getParamDereferenceableBytes(unsigned i) const

Extract the number of dereferenceable bytes for a call or parameter (0=unknown).

CallingConv::ID getCallingConv() const

LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Determine whether the argument or parameter has the given attribute.

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

LLVM_ABI bool isIndirectCall() const

Return true if the callsite is an indirect call.

Value * getCalledOperand() const

void setAttributes(AttributeList A)

Set the attributes for this call.

Attribute getFnAttr(StringRef Kind) const

Get the attribute of a given kind for the function.

bool doesNotThrow() const

Determine if the call cannot unwind.

void addRetAttr(Attribute::AttrKind Kind)

Adds the attribute to the return value.

Value * getArgOperand(unsigned i) const

User::op_iterator arg_end()

Return the iterator pointing to the end of the argument list.

bool isConvergent() const

Determine if the invoke is convergent.

FunctionType * getFunctionType() const

LLVM_ABI Intrinsic::ID getIntrinsicID() const

Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...

Value * getReturnedArgOperand() const

If one of the arguments has the 'returned' attribute, returns its operand value.

static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)

Create a clone of CB with a different set of operand bundles and insert it before InsertPt.

iterator_range< User::op_iterator > args()

Iteration adapter for range-for loops.

void setCalledOperand(Value *V)

static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)

Create a clone of CB with operand bundle ID removed.

unsigned arg_size() const

AttributeList getAttributes() const

Return the attributes for this call.

bool hasOperandBundles() const

Return true if this User has any operand bundles.

void setCalledFunction(Function *Fn)

Sets the function called, including updating the function type.

LLVM_ABI Function * getCaller()

Helper to get the caller (the parent function).

CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...

static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)

This class represents a function call, abstracting a target machine's calling convention.

bool isNoTailCall() const

static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

bool isMustTailCall() const

static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)

Returns the opcode necessary to cast Val into Ty using usual casting rules.

static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a ZExt, BitCast, or Trunc for int -> int casts.

static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)

Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.

static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.

static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

@ FCMP_OEQ

0 0 0 1 True if ordered and equal

@ ICMP_SLT

signed less than

@ ICMP_SLE

signed less or equal

@ FCMP_OLT

0 1 0 0 True if ordered and less than

@ FCMP_OGT

0 0 1 0 True if ordered and greater than

@ FCMP_OGE

0 0 1 1 True if ordered and greater than or equal

@ ICMP_UGT

unsigned greater than

@ ICMP_SGT

signed greater than

@ FCMP_ONE

0 1 1 0 True if ordered and operands are unequal

@ FCMP_UEQ

1 0 0 1 True if unordered or equal

@ ICMP_ULT

unsigned less than

@ FCMP_OLE

0 1 0 1 True if ordered and less than or equal

@ FCMP_UNE

1 1 1 0 True if unordered or not equal

@ ICMP_ULE

unsigned less or equal

Predicate getSwappedPredicate() const

For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.

Predicate getNonStrictPredicate() const

For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.

Predicate getUnorderedPredicate() const

static LLVM_ABI ConstantAggregateZero * get(Type *Ty)

static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)

Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.

static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)

static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)

static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)

static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)

This is the shared class of boolean and integer constants.

uint64_t getLimitedValue(uint64_t Limit=~0ULL) const

getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...

static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)

static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)

uint64_t getZExtValue() const

Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...

const APInt & getValue() const

Return the constant as an APInt value reference.

static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)

static LLVM_ABI ConstantPointerNull * get(PointerType *T)

Static factory methods - Return objects of the specified value.

static LLVM_ABI ConstantPtrAuth * get(Constant *Ptr, ConstantInt *Key, ConstantInt *Disc, Constant *AddrDisc, Constant *DeactivationSymbol)

Return a pointer signed with the specified parameters.

This class represents a range of values.

LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const

Return a new range representing the possible values resulting from a multiplication of a value in thi...

LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const

Make this range have the bit width given by BitWidth.

LLVM_ABI bool isFullSet() const

Return true if this set contains all of the elements possible for this data-type.

LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const

Does the predicate Pred hold between ranges this and Other?

LLVM_ABI bool contains(const APInt &Val) const

Return true if the specified value is in the set.

uint32_t getBitWidth() const

Get the bit width of this ConstantRange.

static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)

This is an important base class in LLVM.

static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)

Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...

static LLVM_ABI Constant * getAllOnesValue(Type *Ty)

static LLVM_ABI Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

A parsed version of the target data layout string in and methods for querying it.

Record of a variable value-assignment, aka a non instruction representation of the dbg....

std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)

size_type count(const_arg_type_t< KeyT > Val) const

Return 1 if the specified key is in the map, 0 otherwise.

bool contains(const_arg_type_t< KeyT > Val) const

Return true if the specified key is in the map, false otherwise.

LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const

Return true if the (end of the) basic block BB dominates the use U.

Lightweight error class with error context and mandatory checking.

static FMFSource intersect(Value *A, Value *B)

Intersect the FMF from two instructions.

This class represents an extension of floating point types.

Convenience struct for specifying and reasoning about fast-math flags.

void setNoSignedZeros(bool B=true)

bool allowReassoc() const

Flag queries.

An instruction for ordering other memory operations.

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this fence instruction.

AtomicOrdering getOrdering() const

Returns the ordering constraint of this fence instruction.

A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...

Class to represent function types.

Type::subtype_iterator param_iterator

static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)

This static method is the primary way of constructing a FunctionType.

bool isConvergent() const

Determine if the call is convergent.

FunctionType * getFunctionType() const

Returns the FunctionType for me.

CallingConv::ID getCallingConv() const

getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...

AttributeList getAttributes() const

Return the attribute list for this Function.

bool doesNotThrow() const

Determine if the function cannot unwind.

bool isIntrinsic() const

isIntrinsic - Returns true if the function's name starts with "llvm.".

LLVM_ABI Value * getBasePtr() const

unsigned getBasePtrIndex() const

The index into the associate statepoint's argument list which contains the base pointer of the pointe...

LLVM_ABI Value * getDerivedPtr() const

unsigned getDerivedPtrIndex() const

The index into the associate statepoint's argument list which contains the pointer whose relocation t...

std::vector< const GCRelocateInst * > getGCRelocates() const

Get list of all gc reloactes linked to this statepoint May contain several relocations for the same b...

MDNode * getMetadata(unsigned KindID) const

Get the current metadata attachments for the given kind, if any.

LLVM_ABI bool isDeclaration() const

Return true if the primary definition of this global value is outside of the current translation unit...

PointerType * getType() const

Global values are always pointers.

Common base class shared among various IRBuilders.

LLVM_ABI Value * CreateLaunderInvariantGroup(Value *Ptr)

Create a launder.invariant.group intrinsic call.

ConstantInt * getTrue()

Get the constant value for i1 true.

LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")

Create a call to intrinsic ID with 2 operands which is mangled on the first type.

LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")

Create a call to intrinsic ID with Args, mangled using Types.

Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)

LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")

Create a call to intrinsic ID with 1 operand which is mangled on its type.

Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)

Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")

ConstantInt * getFalse()

Get the constant value for i1 false.

Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")

Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")

LLVM_ABI Value * CreateStripInvariantGroup(Value *Ptr)

Create a strip.invariant.group intrinsic call.

static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const

Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)

Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...

Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override

The specified value produces a vector with any number of elements.

bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override

This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...

Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)

Given an instruction with a select as one operand and a constant as the other operand,...

Instruction * SimplifyAnyMemSet(AnyMemSetInst *MI)

Definition InstCombineCalls.cpp:220

Instruction * visitFree(CallInst &FI, Value *FreedOp)

Instruction * visitCallBrInst(CallBrInst &CBI)

Definition InstCombineCalls.cpp:4231

Instruction * eraseInstFromFunction(Instruction &I) override

Combiner aware instruction erasure.

Value * foldReversedIntrinsicOperands(IntrinsicInst *II)

If all arguments of the intrinsic are reverses, try to pull the reverse after the intrinsic.

Definition InstCombineCalls.cpp:1545

Value * tryGetLog2(Value *Op, bool AssumeNonZero)

Instruction * visitFenceInst(FenceInst &FI)

Definition InstCombineCalls.cpp:4198

Instruction * foldShuffledIntrinsicOperands(IntrinsicInst *II)

If all arguments of the intrinsic are unary shuffles with the same mask, try to shuffle after the int...

Definition InstCombineCalls.cpp:1488

Instruction * visitInvokeInst(InvokeInst &II)

Definition InstCombineCalls.cpp:4226

bool SimplifyDemandedInstructionBits(Instruction &Inst)

Tries to simplify operands to an integer instruction based on its demanded bits.

void CreateNonTerminatorUnreachable(Instruction *InsertAt)

Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...

Instruction * visitVAEndInst(VAEndInst &I)

Definition InstCombineCalls.cpp:904

Instruction * matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, bool MatchBitReversals)

Given an initial instruction, check to see if it is the root of a bswap/bitreverse idiom.

Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)

Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...

Instruction * visitAllocSite(Instruction &FI)

Instruction * SimplifyAnyMemTransfer(AnyMemTransferInst *MI)

Definition InstCombineCalls.cpp:118

OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const

Instruction * visitCallInst(CallInst &CI)

CallInst simplification.

Definition InstCombineCalls.cpp:1813

The core instruction combiner logic.

unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const

IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy

An IRBuilder that automatically inserts new instructions into the worklist.

bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)

Return true if the specified value is free to invert (apply ~ to).

DominatorTree & getDominatorTree() const

Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)

Inserts an instruction New before instruction Old.

Instruction * replaceInstUsesWith(Instruction &I, Value *V)

A combiner-aware RAUW-like routine.

void replaceUse(Use &U, Value *NewValue)

Replace use and add the previously used value to the worklist.

InstructionWorklist & Worklist

A worklist of the instructions that need to be simplified.

void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const

std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)

Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)

Replace operand of instruction and add old operand to the worklist.

bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const

AssumptionCache & getAssumptionCache() const

OptimizationRemarkEmitter & ORE

Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)

const SimplifyQuery & getSimplifyQuery() const

bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)

LLVM_ABI Instruction * clone() const

Create a copy of 'this' instruction that is identical in all ways except the following:

LLVM_ABI void setHasNoUnsignedWrap(bool b=true)

Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.

LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY

Return true if this instruction may modify memory.

LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)

Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...

LLVM_ABI void setHasNoSignedWrap(bool b=true)

Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

LLVM_ABI const Module * getModule() const

Return the module owning the function this instruction belongs to or nullptr it the function does not...

LLVM_ABI void setAAMetadata(const AAMDNodes &N)

Sets the AA metadata on this instruction from the AAMDNodes structure.

LLVM_ABI void moveBefore(InstListType::iterator InsertPos)

Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...

LLVM_ABI const Function * getFunction() const

Return the function this instruction belongs to.

MDNode * getMetadata(unsigned KindID) const

Get the metadata of given kind attached to this Instruction.

bool isTerminator() const

LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)

Set the metadata of the specified kind to the specified node.

LLVM_ABI std::optional< InstListType::iterator > getInsertionPointAfterDef()

Get the first insertion point at which the result of this instruction is defined.

LLVM_ABI bool isIdenticalTo(const Instruction *I) const LLVM_READONLY

Return true if the specified instruction is exactly identical to the current one.

void setDebugLoc(DebugLoc Loc)

Set the debug location information for this instruction.

LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())

Copy metadata from SrcInst to this instruction.

Class to represent integer types.

static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)

This static method is the primary way of constructing an IntegerType.

A wrapper class for inspecting calls to intrinsic functions.

Intrinsic::ID getIntrinsicID() const

Return the intrinsic ID of this intrinsic.

static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)

This is an important class for using LLVM in a threaded context.

An instruction for reading from memory.

static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)

static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)

static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)

Returns the comparison predicate underlying the intrinsic.

ICmpInst::Predicate getPredicate() const

Returns the comparison predicate underlying the intrinsic.

bool isSigned() const

Whether the intrinsic is signed or unsigned.

A Module instance is used to store all the information related to an LLVM module.

StringRef getName() const

Get a short "name" for the module.

unsigned getOpcode() const

Return the opcode for this Instruction or ConstantExpr.

Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.

bool hasNoSignedWrap() const

Test whether this operation is known to never undergo signed overflow, aka the nsw property.

bool hasNoUnsignedWrap() const

Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.

bool isCommutative() const

Return true if the instruction is commutative.

static LLVM_ABI PoisonValue * get(Type *T)

Static factory methods - Return an 'poison' object of the specified type.

Represents a saturating add/sub intrinsic.

This class represents the LLVM 'select' instruction.

static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)

This instruction constructs a fixed permutation of two input vectors.

This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...

bool test(unsigned Idx) const

bool all() const

Returns true if all bits are set.

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...

reference emplace_back(ArgTypes &&... Args)

void reserve(size_type N)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

void setVolatile(bool V)

Specify whether this is a volatile store or not.

void setAlignment(Align Align)

void setOrdering(AtomicOrdering Ordering)

Sets the ordering constraint of this store instruction.

StringRef - Represent a constant reference to a string, i.e.

Class to represent struct types.

static LLVM_ABI bool isCallingConvCCompatible(CallBase *CI)

Returns true if call site / callee has cdecl-compatible calling conventions.

Provides information about what library functions are available for the current target.

This class represents a truncation of integer types.

The instances of the Type class are immutable: once they are created, they are never changed.

static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)

LLVM_ABI unsigned getIntegerBitWidth() const

static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)

bool isPointerTy() const

True if this is an instance of PointerType.

LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const

Return true if this type could be converted with a lossless BitCast to type 'Ty'.

Type * getScalarType() const

If this is a vector type, return the element type, otherwise return 'this'.

bool isStructTy() const

True if this is an instance of StructType.

LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const

Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...

LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY

If this is a vector type, return the getPrimitiveSizeInBits value for the element type.

bool isIntegerTy() const

True if this is an instance of IntegerType.

bool isVoidTy() const

Return true if this is 'void'.

static UnaryOperator * CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)

static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)

static LLVM_ABI UndefValue * get(Type *T)

Static factory methods - Return an 'undef' object of the specified type.

A Use represents the edge between a Value definition and its users.

LLVM_ABI unsigned getOperandNo() const

Return the operand # of this use in its User.

void setOperand(unsigned i, Value *Val)

Value * getOperand(unsigned i) const

This represents the llvm.va_end intrinsic.

static LLVM_ABI void ValueIsDeleted(Value *V)

static LLVM_ABI void ValueIsRAUWd(Value *Old, Value *New)

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

static constexpr uint64_t MaximumAlignment

bool hasOneUse() const

Return true if there is exactly one use of this value.

iterator_range< user_iterator > users()

static LLVM_ABI void dropDroppableUse(Use &U)

Remove the droppable use U.

LLVM_ABI const Value * stripPointerCasts() const

Strip off pointer casts, all-zero GEPs and address space casts.

LLVM_ABI LLVMContext & getContext() const

All values hold a context through their type.

static constexpr unsigned MaxAlignmentExponent

The maximum alignment for instructions.

LLVM_ABI StringRef getName() const

Return a constant reference to the value's name.

LLVM_ABI void takeName(Value *V)

Transfer the name from V to this value.

Base class of all SIMD vector types.

ElementCount getElementCount() const

Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...

static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)

This static method is the primary way to construct an VectorType.

constexpr ScalarTy getFixedValue() const

static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)

constexpr bool isFixed() const

Returns true if the quantity is not scaled by vscale.

static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)

const ParentTy * getParent() const

self_iterator getIterator()

NodeTy * getNextNode()

Get the next node, or nullptr for the list tail.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Align[]

Key for Kernel::Arg::Metadata::mAlign.

constexpr char Args[]

Key for Kernel::Metadata::mArgs.

constexpr char Attrs[]

Key for Kernel::Metadata::mAttrs.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ C

The default llvm calling convention, compatible with C.

@ BasicBlock

Various leaf nodes.

LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})

Look up the Function declaration of the intrinsic id in the Module M.

SpecificConstantMatch m_ZeroInt()

Convenience matchers for specific integer values.

BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)

Matches a register negated by a G_SUB.

BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)

Matches a register not-ed by a G_XOR.

OneUse_match< SubPat > m_OneUse(const SubPat &SP)

cst_pred_ty< is_all_ones > m_AllOnes()

Match an integer or vector with all bits set.

class_match< PoisonValue > m_Poison()

Match an arbitrary poison constant.

BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)

class_match< BinaryOperator > m_BinOp()

Match an arbitrary binary operation and ignore it.

auto m_PtrToIntOrAddr(const OpTy &Op)

Matches PtrToInt or PtrToAddr.

m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)

class_match< Constant > m_Constant()

Match an arbitrary Constant and ignore it.

ap_match< APInt > m_APInt(const APInt *&Res)

Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.

BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)

Matches an And with LHS and RHS in either order.

CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)

Matches Trunc.

BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)

ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)

Match APInt while allowing poison in splat vector constants.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)

specific_intval< false > m_SpecificInt(const APInt &V)

Match a specific integer value or vector with all elements equal to the value.

bool match(Val *V, const Pattern &P)

bind_ty< Instruction > m_Instruction(Instruction *&I)

Match an instruction, capturing it if we match.

specificval_ty m_Specific(const Value *V)

Match if we have a specific specified value.

ap_match< APFloat > m_APFloat(const APFloat *&Res)

Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...

OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)

Matches a 'Neg' as 'sub nsw 0, V'.

class_match< ConstantInt > m_ConstantInt()

Match an arbitrary ConstantInt and ignore it.

cst_pred_ty< is_one > m_One()

Match an integer 1 or a vector with all elements equal to 1.

IntrinsicID_match m_Intrinsic()

Match intrinsic calls like this: m_IntrinsicIntrinsic::fabs(m_Value(X))

ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)

Matches SelectInst.

cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()

Match a floating-point negative zero.

specific_fpval m_SpecificFP(double V)

Match a specific floating point value or vector with all elements equal to the value.

ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)

Match a single index ExtractValue instruction.

BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)

Matches logical shift operations.

match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)

Combine two pattern matchers matching L && R.

MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)

Matches an Xor with LHS and RHS in either order.

deferredval_ty< Value > m_Deferred(Value *const &V)

Like m_Specific(), but works if the specific value to match is determined as part of the same match()...

match_combine_or< match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > >, OpTy > m_ZExtOrSExtOrSelf(const OpTy &Op)

auto m_LogicalOr()

Matches L || R where L and R are arbitrary values.

TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)

Matches ShuffleVectorInst independently of mask value.

cst_pred_ty< is_strictlypositive > m_StrictlyPositive()

Match an integer or vector of strictly positive values.

ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)

Match Select(C, LHS, RHS) or Select(C, RHS, LHS)

CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)

SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)

CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)

Matches ZExt.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)

OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)

MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)

cst_pred_ty< is_negated_power2 > m_NegatedPower2()

Match a integer or vector negated power-of-2.

match_immconstant_ty m_ImmConstant()

Match an arbitrary immediate Constant and ignore it.

cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)

Match an integer or vector where CheckFn(ele) for each element is true.

m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)

match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)

class_match< UnaryOperator > m_UnOp()

Match an arbitrary unary operation and ignore it.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)

MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)

match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)

Match either "add nsw" or "or disjoint".

class_match< Value > m_Value()

Match an arbitrary value and ignore it.

BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)

Exact_match< T > m_Exact(const T &SubPattern)

FNeg_match< OpTy > m_FNeg(const OpTy &X)

Match 'fneg X' as 'fsub -0.0, X'.

BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)

Matches shift operations.

cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()

Match a floating-point positive zero.

BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)

m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)

auto m_LogicalAnd()

Matches L && R where L and R are arbitrary values.

match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)

m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)

BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)

auto m_Undef()

Match an arbitrary undef constant.

m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)

CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)

Matches SExt.

is_zero m_Zero()

Match any null constant or a vector with all elements equal to 0.

BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)

Matches an Or with LHS and RHS in either order.

match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)

Match either "add nuw" or "or disjoint".

BinOpPred_match< LHS, RHS, is_bitwiselogic_op > m_BitwiseLogic(const LHS &L, const RHS &R)

Matches bitwise logic operations.

m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)

BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)

Matches a Mul with LHS and RHS in either order.

m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)

MatchFunctor< Val, Pattern > match_fn(const Pattern &P)

A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.

MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)

match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)

Combine two pattern matchers matching L || R.

@ SingleThread

Synchronized with respect to signal handlers executing in the same thread.

@ System

Synchronized with respect to all concurrently executing threads.

SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)

Return a range of dbg_assign records for which Inst performs the assignment they encode.

initializer< Ty > init(const Ty &Val)

std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)

Extract a Value from Metadata.

DiagnosticInfoOptimizationBase::Argument NV

friend class Instruction

Iterator for Instructions in a `BasicBlock.

This is an optimization pass for GlobalISel generic memory operations.

LLVM_ABI cl::opt< bool > EnableKnowledgeRetention

LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)

unsigned Log2_32_Ceil(uint32_t Value)

Return the ceil log base 2 of the specified value, 32 if the value is zero.

FunctionAddr VTableAddr Value

@ NeverOverflows

Never overflows.

@ AlwaysOverflowsHigh

Always overflows in the direction of signed/unsigned max value.

@ AlwaysOverflowsLow

Always overflows in the direction of signed/unsigned min value.

@ MayOverflow

May or may not overflow.

LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)

Given operands for an FMul, fold the result or return null.

LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)

Return true if it is valid to use the assumptions provided by an assume intrinsic,...

LLVM_ABI APInt possiblyDemandedEltsInMask(Value *Mask)

Given a mask vector of the form , return an APInt (of bitwidth Y) for each lane which may be ...

LLVM_ABI RetainedKnowledge simplifyRetainedKnowledge(AssumeInst *Assume, RetainedKnowledge RK, AssumptionCache *AC, DominatorTree *DT)

canonicalize the RetainedKnowledge RK.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)

Return true if this is a call to an allocation function that does not have side effects that we are r...

LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)

Try to turn a call to @llvm.objectsize into an integer value of the given Type.

LLVM_ABI Value * getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI)

Gets the alignment argument for an aligned_alloc-like function, using either built-in knowledge based...

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

LLVM_ABI RetainedKnowledge getKnowledgeFromOperandInAssume(AssumeInst &Assume, unsigned Idx)

Retreive the information help by Assume on the operand at index Idx.

LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)

Implements IEEE 754-2019 maximum semantics.

LLVM_ABI Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)

Given a callsite, callee, and arguments, fold the result or return null.

LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)

Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.

constexpr T alignDown(U Value, V Align, W Skew=0)

Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.

constexpr bool isPowerOf2_64(uint64_t Value)

Return true if the argument is a power of two > 0 (64 bit edition.)

LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)

Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...

LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)

Return true if the instruction does not have any effects besides calculating the result and does not ...

LLVM_ABI Value * getSplatValue(const Value *V)

Get splat value if the input is a splat vector or return nullptr.

constexpr T MinAlign(U A, V B)

A and B are either alignments or offsets.

LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)

This extracts the Knowledge from an element of an operand bundle.

auto dyn_cast_or_null(const Y &Val)

Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to infer an alignment for the specified pointer.

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)

Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...

LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)

Implements IEEE-754 2008 maxNum semantics.

LLVM_ABI FPClassTest fneg(FPClassTest Mask)

Return the test mask which returns true if the value's sign bit is flipped.

SelectPatternFlavor

Specific patterns of select instructions we can match.

@ SPF_ABS

Floating point maxnum.

@ SPF_NABS

Absolute value.

LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)

constexpr bool isPowerOf2_32(uint32_t Value)

Return true if the argument is a power of two > 0.

bool isModSet(const ModRefInfo MRI)

void sort(IteratorTy Start, IteratorTy End)

FPClassTest

Floating-point class tests, supported by 'is_fpclass' intrinsic.

APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)

Returns: X * 2^Exp for integral exponents.

LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)

Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...

LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)

Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...

LLVM_ABI bool matchSimpleBinaryIntrinsicRecurrence(const IntrinsicInst *I, PHINode *&P, Value *&Init, Value *&OtherOp)

Attempt to match a simple value-accumulating recurrence of the form: llvm.intrinsic....

LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)

Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...

auto find_if_not(R &&Range, UnaryPredicate P)

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

bool none_of(R &&Range, UnaryPredicate P)

Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.

bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)

LLVM_ABI Constant * getLosslessSignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)

LLVM_ABI AssumeInst * buildAssumeFromKnowledge(ArrayRef< RetainedKnowledge > Knowledge, Instruction *CtxI, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)

Build and return a new assume created from the provided knowledge if the knowledge in the assume is f...

LLVM_ABI FPClassTest inverse_fabs(FPClassTest Mask)

Return the test mask which returns true after fabs is applied to the value.

LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)

Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...

iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)

Split the specified string over a separator and return a range-compatible iterable over its partition...

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)

Return true if the instruction doesn't potentially cross vector lanes.

LLVM_ABI bool maskIsAllOneOrUndef(Value *Mask)

Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...

LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key

LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)

Attempt to constant fold a binary operation with the specified operands.

LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)

Return true if the given value is known to be non-zero when defined.

constexpr int PoisonMaskElem

@ Mod

The access may modify the value stored in memory.

LLVM_ABI Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)

Given operands for the multiplication of a FMA, fold the result or return null.

FunctionAddr VTableAddr uintptr_t uintptr_t Data

LLVM_ABI Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)

Given a constrained FP intrinsic call, tries to compute its simplified version.

LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)

Implements IEEE-754 2008 minNum semantics.

OperandBundleDefT< Value * > OperandBundleDef

LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)

Identifies if the vector form of the intrinsic has a scalar operand.

LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)

Combine constant ranges from computeConstantRange() and computeKnownBits().

FunctionAddr VTableAddr Next

DWARFExpression::Operation Op

bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)

Don't use information from its non-constant operands.

ArrayRef(const T &OneElt) -> ArrayRef< T >

LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)

If this if a call to a free function, return the freed operand.

constexpr unsigned BitWidth

LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)

Return true if this is always a dereferenceable pointer.

LLVM_ABI bool maskIsAllZeroOrUndef(Value *Mask)

Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

bool is_contained(R &&Range, const E &Element)

Returns true if Element is found in Range.

LLVM_ABI std::optional< APInt > getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI, function_ref< const Value *(const Value *)> Mapper=[](const Value *V) { return V;})

Return the size of the requested allocation.

unsigned Log2(Align A)

Returns the log2 of the alignment.

LLVM_ABI bool maskContainsAllOneOrUndef(Value *Mask)

Given a mask vector of i1, Return true if any of the elements of this predicate mask are known to be ...

LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)

Return the boolean condition value in the context of the given instruction if it is known based on do...

LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)

Implements IEEE 754-2019 minimum semantics.

LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)

Return true if the two given values are negation.

LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)

This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....

LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)

Returns true if the give value is known to be non-negative.

LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)

Identify if the intrinsic is trivially vectorizable.

LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)

Return false if we can prove that the specified FP value's sign bit is 0.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

A collection of metadata nodes that might be associated with a memory access used by the alias-analys...

This struct is a compact representation of a valid (non-zero power of two) alignment.

@ IEEE

IEEE-754 denormal numbers preserved.

bool isNonNegative() const

Returns true if this value is known to be non-negative.

unsigned countMinTrailingZeros() const

Returns the minimum number of trailing zero bits.

unsigned countMaxTrailingZeros() const

Returns the maximum number of trailing zero bits possible.

unsigned countMaxPopulation() const

Returns the maximum number of bits that could be one.

unsigned getBitWidth() const

Get the bit width of this value.

bool isNonZero() const

Returns true if this value is known to be non-zero.

unsigned countMinLeadingZeros() const

Returns the minimum number of leading zero bits.

bool isNegative() const

Returns true if this value is known to be negative.

unsigned countMaxLeadingZeros() const

Returns the maximum number of leading zero bits possible.

unsigned countMinPopulation() const

Returns the number of bits known to be one.

bool isAllOnes() const

Returns true if value is all one bits.

FPClassTest KnownFPClasses

Floating-point classes the value could be one of.

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.

Align valueOrOne() const

For convenience, returns a valid alignment or 1 if undefined.

A lightweight accessor for an operand bundle meant to be passed around by value.

StringRef getTagName() const

Return the tag of this operand bundle as a string.

uint32_t getTagID() const

Return the tag of this operand bundle as an integer.

Represent one information held inside an operand bundle of an llvm.assume.

Attribute::AttrKind AttrKind

SelectPatternFlavor Flavor

SimplifyQuery getWithInstruction(const Instruction *I) const