LLVM: lib/Transforms/InstCombine/InstCombineCalls.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

47#include "llvm/IR/IntrinsicsAArch64.h"

48#include "llvm/IR/IntrinsicsAMDGPU.h"

49#include "llvm/IR/IntrinsicsARM.h"

50#include "llvm/IR/IntrinsicsHexagon.h"

74#include

75#include

76#include

77#include

78#include

79#include

80

81#define DEBUG_TYPE "instcombine"

83

84using namespace llvm;

86

87STATISTIC(NumSimplified, "Number of library calls simplified");

88

90 "instcombine-guard-widening-window",

92 cl::desc("How wide an instruction window to bypass looking for "

93 "another guard"));

94

95

96

99 if (ITy->getBitWidth() < 32)

101 }

102 return Ty;

103}

104

105

106

107

109 auto *Src = MI->getRawSource();

111 if (!Src->hasOneUse())

112 return false;

114 }

116}

117

120 MaybeAlign CopyDstAlign = MI->getDestAlign();

121 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {

122 MI->setDestAlignment(DstAlign);

123 return MI;

124 }

125

127 MaybeAlign CopySrcAlign = MI->getSourceAlign();

128 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {

129 MI->setSourceAlignment(SrcAlign);

130 return MI;

131 }

132

133

134

135

136 if (isModSet(AA->getModRefInfoMask(MI->getDest()))) {

137

139 return MI;

140 }

141

142

143

145

147 return MI;

148 }

149

150

151

153 if (!MemOpLength) return nullptr;

154

155

156

157

158

160 assert(Size && "0-sized memory transferring should be removed already.");

161

163 return nullptr;

164

165

166

167

168

169 if (MI->isAtomic())

170 if (*CopyDstAlign < Size || *CopySrcAlign < Size)

171 return nullptr;

172

173

175

176

177

178 AAMDNodes AACopyMD = MI->getAAMetadata().adjustForAccess(Size);

179

180 Value *Src = MI->getArgOperand(1);

181 Value *Dest = MI->getArgOperand(0);

183

184 L->setAlignment(*CopySrcAlign);

185 L->setAAMetadata(AACopyMD);

186 MDNode *LoopMemParallelMD =

187 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);

188 if (LoopMemParallelMD)

189 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);

190 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);

191 if (AccessGroupMD)

192 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);

193

195

198 if (LoopMemParallelMD)

199 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);

200 if (AccessGroupMD)

201 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);

203

205

206 L->setVolatile(MT->isVolatile());

208 }

209 if (MI->isAtomic()) {

210

213 }

214

215

217 return MI;

218}

219

221 const Align KnownAlignment =

224 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {

225 MI->setDestAlignment(KnownAlignment);

226 return MI;

227 }

228

229

230

231

232 if (isModSet(AA->getModRefInfoMask(MI->getDest()))) {

233

235 return MI;

236 }

237

238

239

240

242

244 return MI;

245 }

246

247

251 return nullptr;

253 assert(Len && "0-sized memory setting should be removed already.");

254 const Align Alignment = MI->getDestAlign().valueOrOne();

255

256

257

258

259

260 if (MI->isAtomic() && Alignment < Len)

261 return nullptr;

262

263

265 Value *Dest = MI->getDest();

266

267

268 Constant *FillVal = ConstantInt::get(

274 DbgAssign->replaceVariableLocationOp(FillC, FillVal);

275 }

276

278 if (MI->isAtomic())

280

281

283 return MI;

284 }

285

286 return nullptr;

287}

288

289

290

292 Value *LoadPtr = II.getArgOperand(0);

293 const Align Alignment = II.getParamAlign(0).valueOrOne();

294

295

296

298 LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,

299 "unmaskedload");

300 L->copyMetadata(II);

301 return L;

302 }

303

304

305

307 II.getDataLayout(), &II, &AC)) {

308 LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,

309 "unmaskedload");

311 return Builder.CreateSelect(II.getArgOperand(1), LI, II.getArgOperand(2));

312 }

313

314 return nullptr;

315}

316

317

318

319

321 Value *StorePtr = II.getArgOperand(1);

322 Align Alignment = II.getParamAlign(1).valueOrOne();

324 if (!ConstMask)

325 return nullptr;

326

327

330

331

333 StoreInst *S =

334 new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);

336 return S;

337 }

338

340 return nullptr;

341

342

344 APInt PoisonElts(DemandedElts.getBitWidth(), 0);

346 PoisonElts))

348

349 return nullptr;

350}

351

352

353

354

355

356

357

360 if (!ConstMask)

361 return nullptr;

362

363

364

365

366 if (ConstMask->isAllOnesValue())

367 if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) {

369 const Align Alignment = II.getParamAlign(0).valueOrOne();

370 LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr,

371 Alignment, "load.scalar");

373 Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast");

375 }

376

377 return nullptr;

378}

379

380

381

382

383

384

387 if (!ConstMask)

388 return nullptr;

389

390

393

394

395 if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) {

396

397 if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) {

399 Align Alignment = II.getParamAlign(1).valueOrOne();

400 StoreInst *S = new StoreInst(SplatValue, SplatPtr, false,

401 Alignment);

403 return S;

404 }

405 }

406

407

408 if (ConstMask->isAllOnesValue()) {

409 Align Alignment = II.getParamAlign(1).valueOrOne();

411 ElementCount VF = WideLoadTy->getElementCount();

415 Builder.CreateExtractElement(II.getArgOperand(0), LastLane);

416 StoreInst *S =

417 new StoreInst(Extract, SplatPtr, false, Alignment);

419 return S;

420 }

421 }

423 return nullptr;

424

425

427 APInt PoisonElts(DemandedElts.getBitWidth(), 0);

429 PoisonElts))

432 PoisonElts))

434

435 return nullptr;

436}

437

438

439

440

441

442

443

444

445

448 auto *Arg = II.getArgOperand(0);

449 auto *StrippedArg = Arg->stripPointerCasts();

450 auto *StrippedInvariantGroupsArg = StrippedArg;

452 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&

453 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)

454 break;

455 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts();

456 }

457 if (StrippedArg == StrippedInvariantGroupsArg)

458 return nullptr;

459

460 Value *Result = nullptr;

461

462 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)

464 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)

466 else

468 "simplifyInvariantGroupIntrinsic only handles launder and strip");

469 if (Result->getType()->getPointerAddressSpace() !=

470 II.getType()->getPointerAddressSpace())

472

474}

475

477 assert((II.getIntrinsicID() == Intrinsic::cttz ||

478 II.getIntrinsicID() == Intrinsic::ctlz) &&

479 "Expected cttz or ctlz intrinsic");

480 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;

481 Value *Op0 = II.getArgOperand(0);

482 Value *Op1 = II.getArgOperand(1);

484

485

487 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;

491 }

492

493 if (II.getType()->isIntOrIntVectorTy(1)) {

494

497

498

499 assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1");

501 }

502

503

506 II.dropUBImplyingAttrsAndMetadata();

508 }

509

511

512 if (IsTZ) {

513

516

517

520

521

524 auto *CttzZext =

527 }

528

529

530

536 }

537

538

539

544

547

548

551 Value *ConstCttz =

553 return BinaryOperator::CreateAdd(ConstCttz, X);

554 }

555

556

559 Value *ConstCttz =

561 return BinaryOperator::CreateSub(ConstCttz, X);

562 }

563

564

567 ConstantInt::get(II.getType(), II.getType()->getScalarSizeInBits());

568 return BinaryOperator::CreateSub(Width, X);

569 }

570 } else {

571

574 Value *ConstCtlz =

576 return BinaryOperator::CreateAdd(ConstCtlz, X);

577 }

578

579

582 Value *ConstCtlz =

584 return BinaryOperator::CreateSub(ConstCtlz, X);

585 }

586

587

591 Type *Ty = II.getType();

592 unsigned BitWidth = Ty->getScalarSizeInBits();

597 }

598 }

599

600

601

603 if (IsTZ)

606 ConstantInt::get(R->getType(), R->getType()->getScalarSizeInBits() - 1),

607 R);

610 return BO;

611 }

612

614

615

620

621

622

623

624

625 if (PossibleZeros == DefiniteZeros) {

626 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);

628 }

629

630

631

632

637 }

638

639

641 if (BitWidth != 1 && II.hasRetAttr(Attribute::Range) &&

642 II.getMetadata(LLVMContext::MD_range)) {

645 II.addRangeRetAttr(Range);

646 return &II;

647 }

648

649 return nullptr;

650}

651

653 assert(II.getIntrinsicID() == Intrinsic::ctpop &&

654 "Expected ctpop intrinsic");

655 Type *Ty = II.getType();

656 unsigned BitWidth = Ty->getScalarSizeInBits();

657 Value *Op0 = II.getArgOperand(0);

659

660

661

664

665

668 X == Y)

670

671

678 }

679

680

686 }

687

688

689

693 }

694

697

698

699

700

701

702

703 if ((~Known.Zero).isPowerOf2())

704 return BinaryOperator::CreateLShr(

705 Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2()));

706

707

708

709

714 Ty);

715

716

719 II.getRange().value_or(ConstantRange::getFull(BitWidth));

720

723

727

730

731 if (Range != OldRange) {

732 II.addRangeRetAttr(Range);

733 return &II;

734 }

735 }

736

737 return nullptr;

738}

739

740

741

742

743

746

748 if (C)

749 return nullptr;

750

752 unsigned NumElts = VecTy->getNumElements();

753

754

755 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)

756 return nullptr;

757

758 int Indexes[8];

759

760 for (unsigned I = 0; I < NumElts; ++I) {

761 Constant *COp = C->getAggregateElement(I);

762

764 return nullptr;

765

767

768

769 if ((unsigned)Indexes[I] >= NumElts)

770 return nullptr;

771 }

772

773 auto *V1 = II.getArgOperand(0);

775 return Builder.CreateShuffleVector(V1, V2, ArrayRef(Indexes));

776}

777

778

779

781 unsigned NumOperands) {

782 assert(I.arg_size() >= NumOperands && "Not enough operands");

783 assert(E.arg_size() >= NumOperands && "Not enough operands");

784 for (unsigned i = 0; i < NumOperands; i++)

785 if (I.getArgOperand(i) != E.getArgOperand(i))

786 return false;

787 return true;

788}

789

790

791

792

793

794

795

796

797

798

799static bool

801 std::function<bool(const IntrinsicInst &)> IsStart) {

802

803

804

806 for (; BI != BE; ++BI) {

808 if (I->isDebugOrPseudoInst() ||

810 continue;

811 if (IsStart(*I)) {

815 return true;

816 }

817

818 continue;

819 }

820 }

821 break;

822 }

823

824 return false;

825}

826

829

830

831 return II.getIntrinsicID() == Intrinsic::vastart ||

832 (II.getIntrinsicID() == Intrinsic::vacopy &&

833 I.getArgOperand(0) != II.getArgOperand(1));

834 });

835 return nullptr;

836}

837

839 assert(Call.arg_size() > 1 && "Need at least 2 args to swap");

840 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);

842 Call.setArgOperand(0, Arg1);

843 Call.setArgOperand(1, Arg0);

844 return &Call;

845 }

846 return nullptr;

847}

848

849

850

858

860InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {

862 Value *OperationResult = nullptr;

867

868

869 for (User *U : WO->users()) {

871 continue;

872

873 for (auto &AssumeVH : AC.assumptionsFor(U)) {

874 if (!AssumeVH)

875 continue;

878 continue;

880 true))

881 continue;

884 Result->takeName(WO);

887 Inst->setHasNoSignedWrap();

888 else

889 Inst->setHasNoUnsignedWrap();

890 }

893 }

894 }

895

896 return nullptr;

897}

898

900 Ty = Ty->getScalarType();

901 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE;

902}

903

905 Ty = Ty->getScalarType();

906 return F.getDenormalMode(Ty->getFltSemantics()).inputsAreZero();

907}

908

909

910

911

914 switch (static_cast<unsigned>(Mask)) {

918 break;

922 break;

926 break;

930 break;

934 break;

938 break;

942 break;

946 break;

950 break;

954 break;

958 break;

962 break;

963 default:

964 break;

965 }

966

968}

969

971 Value *Src0 = II.getArgOperand(0);

972 Value *Src1 = II.getArgOperand(1);

978 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;

979

980 const bool IsStrict =

981 II.getFunction()->getAttributes().hasFnAttr(Attribute::StrictFP);

982

985

986

987 II.setArgOperand(1, ConstantInt::get(Src1->getType(), fneg(Mask)));

989 }

990

995 }

996

997 if ((OrderedMask == fcInf || OrderedInvertedMask == fcInf) &&

998 (IsOrdered || IsUnordered) && !IsStrict) {

999

1000

1001

1002

1006 if (OrderedInvertedMask == fcInf)

1008

1009 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Src0);

1010 Value *CmpInf = Builder.CreateFCmp(Pred, Fabs, Inf);

1013 }

1014

1016 (IsOrdered || IsUnordered) && !IsStrict) {

1017

1018

1019

1020

1023 Value *EqInf = IsUnordered ? Builder.CreateFCmpUEQ(Src0, Inf)

1024 : Builder.CreateFCmpOEQ(Src0, Inf);

1025

1028 }

1029

1030 if ((OrderedInvertedMask == fcPosInf || OrderedInvertedMask == fcNegInf) &&

1031 (IsOrdered || IsUnordered) && !IsStrict) {

1032

1033

1034

1035

1037 OrderedInvertedMask == fcNegInf);

1038 Value *NeInf = IsUnordered ? Builder.CreateFCmpUNE(Src0, Inf)

1039 : Builder.CreateFCmpONE(Src0, Inf);

1042 }

1043

1044 if (Mask == fcNan && !IsStrict) {

1045

1046

1051 }

1052

1054

1059 }

1060

1062

1063

1064

1065

1066

1067

1068

1069

1070

1071

1072

1073

1074

1075

1076 if (!IsStrict && (IsOrdered || IsUnordered) &&

1081

1084 Src0, Zero);

1085

1088 }

1089

1091

1092

1093

1094

1096 II.setArgOperand(

1098 return &II;

1099 }

1100

1101

1102

1103

1106

1107 return nullptr;

1108}

1109

1113 return false;

1115 return true;

1116

1120

1121 return std::nullopt;

1122}

1123

1126 if (std::optional Sign = getKnownSign(Op, SQ))

1127 return Sign;

1128

1132

1133 return std::nullopt;

1134}

1135

1136

1139 std::optional Known1 = getKnownSign(Op1, SQ);

1140 if (!Known1)

1141 return false;

1142 std::optional Known0 = getKnownSign(Op0, SQ);

1143 if (!Known0)

1144 return false;

1145 return *Known0 == *Known1;

1146}

1147

1148

1149

1153 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||

1154 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&

1155 "Expected a min or max intrinsic");

1156

1157

1158 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);

1160 const APInt *C0, *C1;

1163 return nullptr;

1164

1165

1166 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;

1168 if ((IsSigned && Add->hasNoSignedWrap()) ||

1169 (!IsSigned && Add->hasNoUnsignedWrap()))

1170 return nullptr;

1171

1172

1173

1174 bool Overflow;

1176 IsSigned ? C1->ssub_ov(*C0, Overflow) : C1->usub_ov(*C0, Overflow);

1177 assert(!Overflow && "Expected simplify of min/max");

1178

1179

1180

1181 Constant *NewMinMaxC = ConstantInt::get(II->getType(), CDiff);

1182 Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID, X, NewMinMaxC);

1183 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax, Add->getOperand(1))

1184 : BinaryOperator::CreateNUWAdd(NewMinMax, Add->getOperand(1));

1185}

1186

1189

1190

1191

1192

1194 BinaryOperator *AddSub;

1195 const APInt *MinValue, *MaxValue;

1198 return nullptr;

1199 } else if (match(&MinMax1,

1202 return nullptr;

1203 } else

1204 return nullptr;

1205

1206

1207

1208 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)

1209 return nullptr;

1210

1211 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;

1212

1213

1215 return nullptr;

1216

1217

1219 return nullptr;

1220

1221

1223

1225 if (AddSub->getOpcode() == Instruction::Add)

1226 IntrinsicID = Intrinsic::sadd_sat;

1227 else if (AddSub->getOpcode() == Instruction::Sub)

1228 IntrinsicID = Intrinsic::ssub_sat;

1229 else

1230 return nullptr;

1231

1232

1233

1236 return nullptr;

1237

1238

1241 Value *Sat = Builder.CreateIntrinsic(IntrinsicID, NewTy, {AT, BT});

1243}

1244

1245

1246

1247

1248

1251 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);

1253 const APInt *C0, *C1;

1255 return nullptr;

1256

1258 switch (II->getIntrinsicID()) {

1259 case Intrinsic::smax:

1262 break;

1263 case Intrinsic::smin:

1266 break;

1267 case Intrinsic::umax:

1270 break;

1271 case Intrinsic::umin:

1274 break;

1275 default:

1277 }

1279 return nullptr;

1280

1281

1282

1283 Value *Cmp = Builder.CreateICmp(Pred, X, I1);

1285}

1286

1287

1288

1294 if (LHS)

1295 return nullptr;

1296

1300 return nullptr;

1301

1302

1303

1304

1305

1307 if (InnerMinMaxID != MinMaxID &&

1308 !(((MinMaxID == Intrinsic::umax && InnerMinMaxID == Intrinsic::smax) ||

1309 (MinMaxID == Intrinsic::smin && InnerMinMaxID == Intrinsic::umin)) &&

1311 return nullptr;

1312

1314 Value *CondC = Builder.CreateICmp(Pred, C0, C1);

1315 Value *NewC = Builder.CreateSelect(CondC, C0, C1);

1316 return Builder.CreateIntrinsic(InnerMinMaxID, II->getType(),

1317 {LHS->getArgOperand(0), NewC});

1318}

1319

1320

1321

1325

1333 return nullptr;

1334

1335

1338 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||

1340 return nullptr;

1341

1342

1344 MinMaxID, II->getType());

1345 Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID, X, Y);

1348}

1349

1350

1352

1356 if (LHS || RHS || LHS->getIntrinsicID() != MinMaxID ||

1357 RHS->getIntrinsicID() != MinMaxID ||

1358 (LHS->hasOneUse() && RHS->hasOneUse()))

1359 return nullptr;

1360

1361 Value *A = LHS->getArgOperand(0);

1362 Value *B = LHS->getArgOperand(1);

1363 Value *C = RHS->getArgOperand(0);

1364 Value *D = RHS->getArgOperand(1);

1365

1366

1367 Value *MinMaxOp = nullptr;

1368 Value *ThirdOp = nullptr;

1369 if (LHS->hasOneUse()) {

1370

1371

1372 if (D == A || C == A) {

1373

1374

1375 MinMaxOp = RHS;

1376 ThirdOp = B;

1377 } else if (D == B || C == B) {

1378

1379

1380 MinMaxOp = RHS;

1381 ThirdOp = A;

1382 }

1383 } else {

1384 assert(RHS->hasOneUse() && "Expected one-use operand");

1385

1386 if (D == A || D == B) {

1387

1388

1389 MinMaxOp = LHS;

1390 ThirdOp = C;

1391 } else if (C == A || C == B) {

1392

1393

1394 MinMaxOp = LHS;

1395 ThirdOp = D;

1396 }

1397 }

1398

1399 if (!MinMaxOp || !ThirdOp)

1400 return nullptr;

1401

1406}

1407

1408

1409

1413 II->getCalledFunction()->isSpeculatable())

1414 return nullptr;

1415

1420 return isa(Arg.get()) ||

1421 isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(),

1422 Arg.getOperandNo(), nullptr);

1423 });

1424 if (!NonConstArg ||

1426 return nullptr;

1427

1428

1429

1431 return nullptr;

1432

1433

1435 Type *SrcTy = X->getType();

1436 for (Use &Arg : II->args()) {

1440 else if (match(&Arg,

1442 X->getType() == SrcTy)

1445

1449 else

1450 return nullptr;

1451 } else

1452 return nullptr;

1453 }

1454

1455

1457

1458

1461 Value *NewIntrinsic =

1462 Builder.CreateIntrinsic(ResTy, II->getIntrinsicID(), NewArgs, FPI);

1464}

1465

1466

1467

1470 return nullptr;

1471

1472

1473

1475 return match(V, m_OneUse(m_VecReverse(m_Value())));

1476 }))

1477 return nullptr;

1478

1482 for (Use &Arg : II->args()) {

1484 Arg.getOperandNo(), nullptr))

1492 else

1493 return nullptr;

1494 }

1495

1496

1499 II->getType(), II->getIntrinsicID(), NewArgs, FPI);

1500 return Builder.CreateVectorReverse(NewIntrinsic);

1501}

1502

1503

1504

1505

1506template <Intrinsic::ID IntrID>

1509 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,

1510 "This helper only supports BSWAP and BITREVERSE intrinsics");

1511

1513

1514

1517 Value *OldReorderX, *OldReorderY;

1519

1520

1521

1522

1523

1527 }

1528

1530 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, Y);

1532 }

1533

1535 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, X);

1537 }

1538 }

1539 return nullptr;

1540}

1541

1542

1543

1545 switch (IID) {

1546 case Intrinsic::smax:

1547 case Intrinsic::smin:

1548 case Intrinsic::umax:

1549 case Intrinsic::umin:

1550 case Intrinsic::maximum:

1551 case Intrinsic::minimum:

1552 case Intrinsic::maximumnum:

1553 case Intrinsic::minimumnum:

1554 case Intrinsic::maxnum:

1555 case Intrinsic::minnum:

1556 return true;

1557 default:

1558 return false;

1559 }

1560}

1561

1562

1563

1564

1565

1566

1571

1572

1573

1574 auto IID = II->getIntrinsicID();

1578 return nullptr;

1579

1580 auto *InvariantBinaryInst =

1584 return InvariantBinaryInst;

1585}

1586

1588 if (!CanReorderLanes)

1589 return nullptr;

1590

1593 return V;

1594

1599 return nullptr;

1600

1601 int Sz = Mask.size();

1603 for (int Idx : Mask) {

1605 return nullptr;

1606 UsedIndices.set(Idx);

1607 }

1608

1609

1610

1611 return UsedIndices.all() ? V : nullptr;

1612}

1613

1614

1615

1616

1617

1618template <Intrinsic::ID IntrID>

1623 static_assert(IntrID == Intrinsic::cttz || IntrID == Intrinsic::ctlz,

1624 "This helper only supports cttz and ctlz intrinsics");

1625

1627 Value *ZeroUndef;

1630 return nullptr;

1631

1632 unsigned BitWidth = I1->getType()->getScalarSizeInBits();

1633 auto LessBitWidth = [BitWidth](auto &C) { return C.ult(BitWidth); };

1635

1636

1637 return nullptr;

1638

1639 Type *Ty = I1->getType();

1641 IntrID == Intrinsic::cttz ? Instruction::Shl : Instruction::LShr,

1642 IntrID == Intrinsic::cttz

1643 ? ConstantInt::get(Ty, 1)

1646 return Builder.CreateBinaryIntrinsic(

1647 IntrID, Builder.CreateOr(CtOp, NewConst),

1649}

1650

1651

1652

1655 switch (ROp) {

1656 case Intrinsic::umax:

1657 case Intrinsic::umin:

1658 if (HasNUW && LOp == Instruction::Add)

1659 return true;

1660 if (HasNUW && LOp == Instruction::Shl)

1661 return true;

1662 return false;

1663 case Intrinsic::smax:

1664 case Intrinsic::smin:

1665 return HasNSW && LOp == Instruction::Add;

1666 default:

1667 return false;

1668 }

1669}

1670

1671

1672

1673

1677 Value *LHS = II->getOperand(0), *RHS = II->getOperand(1);

1679

1682

1683 if (!Op0 || !Op1)

1684 return nullptr;

1685

1687 return nullptr;

1688

1690 return nullptr;

1691

1696

1698 return nullptr;

1699

1704

1705

1706

1708 if (A == D || B == C)

1710 else

1711 return nullptr;

1712 }

1713

1715 if (A == C) {

1716 Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode, B, D);

1717 NewBinop =

1719 } else if (B == D) {

1720 Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode, A, C);

1721 NewBinop =

1723 } else {

1724 return nullptr;

1725 }

1726

1729

1730 return NewBinop;

1731}

1732

1733

1734

1735

1737

1738

1742 SQ.getWithInstruction(&CI)))

1744 }

1745

1748

1749

1750

1753 return &CI;

1754 }

1755

1757 if (II)

1758 return visitCallBase(CI);

1759

1760

1761

1763 if (auto NumBytes = MI->getLengthInBytes()) {

1764

1765 if (NumBytes->isZero())

1767

1768

1769

1770 if (MI->isAtomic() &&

1771 (NumBytes->isNegative() ||

1772 (NumBytes->getZExtValue() % MI->getElementSizeInBytes() != 0))) {

1774 assert(MI->getType()->isVoidTy() &&

1775 "non void atomic unordered mem intrinsic");

1777 }

1778 }

1779

1780

1781 if (MI->isVolatile())

1782 return nullptr;

1783

1785

1786 if (MTI->getSource() == MTI->getDest())

1788 }

1789

1790 auto IsPointerUndefined = [MI](Value *Ptr) {

1793 MI->getFunction(),

1795 };

1796 bool SrcIsUndefined = false;

1797

1798

1801 return I;

1802 SrcIsUndefined = IsPointerUndefined(MTI->getRawSource());

1805 return I;

1806 }

1807

1808

1809 if (SrcIsUndefined || IsPointerUndefined(MI->getRawDest())) {

1810 Builder.CreateAssumption(Builder.CreateIsNull(MI->getLength()));

1812 }

1813

1814

1815

1816

1819 if (GVSrc->isConstant()) {

1822 MMI->isAtomic()

1823 ? Intrinsic::memcpy_element_unordered_atomic

1824 : Intrinsic::memcpy;

1830 return II;

1831 }

1832 }

1833 }

1834

1835

1836

1838 auto VWidth = IIFVTy->getNumElements();

1839 APInt PoisonElts(VWidth, 0);

1842 if (V != II)

1844 return II;

1845 }

1846 }

1847

1848 if (II->isCommutative()) {

1849 if (auto Pair = matchSymmetricPair(II->getOperand(0), II->getOperand(1))) {

1852 return II;

1853 }

1854

1856 return NewCall;

1857 }

1858

1859

1860

1861

1862

1866 }

1867

1869 switch (IID) {

1870 case Intrinsic::objectsize: {

1873 &InsertedInstructions)) {

1874 for (Instruction *Inserted : InsertedInstructions)

1877 }

1878 return nullptr;

1879 }

1880 case Intrinsic::abs: {

1881 Value *IIOperand = II->getArgOperand(0);

1882 bool IntMinIsPoison = cast(II->getArgOperand(1))->isOneValue();

1883

1884

1890 }

1893

1895

1896 if (match(IIOperand,

1899 bool NSW =

1900 cast(IIOperand)->hasNoSignedWrap() && IntMinIsPoison;

1901 auto *XY = NSW ? Builder.CreateNSWMul(X, Y) : Builder.CreateMul(X, Y);

1903 }

1904

1905 if (std::optional Known =

1907

1908

1909 if (!*Known)

1911

1912

1913

1914 if (IntMinIsPoison)

1917 }

1918

1919

1920

1922 Value *NarrowAbs =

1923 Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse());

1924 return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType());

1925 }

1926

1927

1928

1931 return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1));

1932

1933 break;

1934 }

1935 case Intrinsic::umin: {

1936 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);

1937

1939 assert(II->getType()->getScalarSizeInBits() != 1 &&

1940 "Expected simplify of umin with max constant");

1942 Value *Cmp = Builder.CreateICmpNE(I0, Zero);

1944 }

1945

1946 if (Value *FoldedCttz =

1950

1951 if (Value *FoldedCtlz =

1955 [[fallthrough]];

1956 }

1957 case Intrinsic::umax: {

1958 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);

1961 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {

1962 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);

1963 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());

1964 }

1969 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);

1970 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());

1971 }

1972 }

1973

1974

1975

1976

1982 return nullptr;

1983 if (C->isZero())

1984 return nullptr;

1986 return nullptr;

1987

1988 Value *Cmp = Builder.CreateICmpEQ(X, ConstantInt::get(X->getType(), 0));

1989 Value *NewSelect =

1990 Builder.CreateSelect(Cmp, ConstantInt::get(X->getType(), 1), A);

1992 };

1993

1994 if (IID == Intrinsic::umax) {

1995 if (Instruction *I = foldMaxMulShift(I0, I1))

1996 return I;

1997 if (Instruction *I = foldMaxMulShift(I1, I0))

1998 return I;

1999 }

2000

2001

2002

2003 [[fallthrough]];

2004 }

2005 case Intrinsic::smax:

2006 case Intrinsic::smin: {

2007 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);

2010 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {

2011 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);

2012 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());

2013 }

2014

2019 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);

2020 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());

2021 }

2022 }

2023

2024

2025

2026 const APInt *MinC, *MaxC;

2027 auto CreateCanonicalClampForm = [&](bool IsSigned) {

2028 auto MaxIID = IsSigned ? Intrinsic::smax : Intrinsic::umax;

2029 auto MinIID = IsSigned ? Intrinsic::smin : Intrinsic::umin;

2030 Value *NewMax = Builder.CreateBinaryIntrinsic(

2031 MaxIID, X, ConstantInt::get(X->getType(), *MaxC));

2033 *II, Builder.CreateBinaryIntrinsic(

2034 MinIID, NewMax, ConstantInt::get(X->getType(), *MinC)));

2035 };

2036 if (IID == Intrinsic::smax &&

2040 return CreateCanonicalClampForm(true);

2041 if (IID == Intrinsic::umax &&

2045 return CreateCanonicalClampForm(false);

2046

2047

2048

2049 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&

2050 II->getType()->isIntOrIntVectorTy(1)) {

2051 return BinaryOperator::CreateAnd(I0, I1);

2052 }

2053

2054

2055

2056 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&

2057 II->getType()->isIntOrIntVectorTy(1)) {

2058 return BinaryOperator::CreateOr(I0, I1);

2059 }

2060

2061

2062

2063

2064

2065

2066 if (IID == Intrinsic::smin) {

2069 Value *Zero = ConstantInt::get(X->getType(), 0);

2071 CI,

2072 Builder.CreateIntrinsic(II->getType(), Intrinsic::scmp, {X, Zero}));

2073 }

2074 }

2075

2076 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {

2077

2078

2079

2081 (I0->hasOneUse() || I1->hasOneUse())) {

2083 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y);

2085 }

2086 }

2087

2088

2089

2090

2091

2092

2093

2094

2095

2096

2097

2098

2099

2103 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;

2104 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;

2105

2106 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {

2108 if (KnownSign == std::nullopt) {

2109 UseOr = false;

2110 UseAndN = false;

2111 } else if (*KnownSign ) {

2112 UseOr ^= true;

2113 UseAndN ^= true;

2115

2116

2117

2120 }

2121 }

2122 if (UseOr)

2123 return BinaryOperator::CreateOr(I0, X);

2124 else if (UseAndN)

2125 return BinaryOperator::CreateAnd(I0, Builder.CreateNot(X));

2126 }

2127

2128

2129

2130

2131

2132

2133

2134

2141 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY);

2143 }

2144 }

2145 return nullptr;

2146 };

2147

2148 if (Instruction *I = moveNotAfterMinMax(I0, I1))

2149 return I;

2150 if (Instruction *I = moveNotAfterMinMax(I1, I0))

2151 return I;

2152

2154 return I;

2155

2156

2157 const APInt *RHSC;

2160 return BinaryOperator::CreateAnd(Builder.CreateBinaryIntrinsic(IID, X, Y),

2161 ConstantInt::get(II->getType(), *RHSC));

2162

2163

2164

2165

2166

2168

2169

2170

2171 if (I0->hasOneUse() && !I1->hasOneUse())

2173

2174

2175

2176 bool IntMinIsPoison = isKnownNegation(I0, I1, true);

2178 Intrinsic::abs, I0,

2180

2181

2182

2183 if (IID == Intrinsic::smin || IID == Intrinsic::umax)

2184 Abs = Builder.CreateNeg(Abs, "nabs", IntMinIsPoison);

2186 }

2187

2189 return Sel;

2190

2192 return SAdd;

2193

2196

2198 return R;

2199

2201 return NewMinMax;

2202

2203

2209 I0, IsSigned, SQ.getWithInstruction(II));

2211 if (LHS_CR.icmp(Pred, *RHSC))

2215 ConstantInt::get(II->getType(), *RHSC));

2216 }

2217 }

2218

2221

2222 break;

2223 }

2224 case Intrinsic::scmp: {

2225 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);

2226 Value *LHS, *RHS;

2229 CI,

2230 Builder.CreateIntrinsic(II->getType(), Intrinsic::scmp, {LHS, RHS}));

2231 break;

2232 }

2233 case Intrinsic::bitreverse: {

2234 Value *IIOperand = II->getArgOperand(0);

2235

2238 X->getType()->isIntOrIntVectorTy(1)) {

2239 Type *Ty = II->getType();

2243 }

2244

2247 return crossLogicOpFold;

2248

2249 break;

2250 }

2251 case Intrinsic::bswap: {

2252 Value *IIOperand = II->getArgOperand(0);

2253

2254

2255

2256

2257

2262 Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X);

2265 ? Instruction::LShr

2266 : Instruction::Shl;

2268 }

2269 }

2270

2275

2276

2277 if (BW - LZ - TZ == 8) {

2278 assert(LZ != TZ && "active byte cannot be in the middle");

2279 if (LZ > TZ)

2280 return BinaryOperator::CreateNUWShl(

2281 IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ));

2282

2283 return BinaryOperator::CreateExactLShr(

2284 IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ));

2285 }

2286

2287

2289 unsigned C = X->getType()->getScalarSizeInBits() - BW;

2290 Value *CV = ConstantInt::get(X->getType(), C);

2293 }

2294

2297 return crossLogicOpFold;

2298 }

2299

2300

2302 true))

2303 return BitOp;

2304 break;

2305 }

2306 case Intrinsic::masked_load:

2307 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))

2309 break;

2310 case Intrinsic::masked_store:

2311 return simplifyMaskedStore(*II);

2312 case Intrinsic::masked_gather:

2313 return simplifyMaskedGather(*II);

2314 case Intrinsic::masked_scatter:

2315 return simplifyMaskedScatter(*II);

2316 case Intrinsic::launder_invariant_group:

2317 case Intrinsic::strip_invariant_group:

2320 break;

2321 case Intrinsic::powi:

2323

2324

2325 if (Power->isMinusOne())

2327 II->getArgOperand(0), II);

2328

2329 if (Power->equalsInt(2))

2331 II->getArgOperand(0), II);

2332

2333 if (!Power->getValue()[0]) {

2335

2336

2337

2338

2341 match(II->getArgOperand(0),

2344 }

2345 }

2346 break;

2347

2348 case Intrinsic::cttz:

2349 case Intrinsic::ctlz:

2351 return I;

2352 break;

2353

2354 case Intrinsic::ctpop:

2356 return I;

2357 break;

2358

2359 case Intrinsic::fshl:

2360 case Intrinsic::fshr: {

2361 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);

2362 Type *Ty = II->getType();

2363 unsigned BitWidth = Ty->getScalarSizeInBits();

2366

2370 if (!ModuloC)

2371 return nullptr;

2372 if (ModuloC != ShAmtC)

2374

2376 ShAmtC, DL),

2378 "Shift amount expected to be modulo bitwidth");

2379

2380

2381

2382

2383 if (IID == Intrinsic::fshr) {

2384

2386 return nullptr;

2387

2393 }

2394 assert(IID == Intrinsic::fshl &&

2395 "All funnel shifts by simple constants should go left");

2396

2397

2398

2400 return BinaryOperator::CreateShl(Op0, ShAmtC);

2401

2402

2403

2405 return BinaryOperator::CreateLShr(Op1,

2407

2408

2414 }

2417 true))

2418 return BitOp;

2419

2420

2421

2423 const APInt *ShAmtInnerC, *ShAmtOuterC;

2425 m_APInt(ShAmtInnerC))) &&

2426 match(ShAmtC, m_APInt(ShAmtOuterC)) && Op0 == Op1) {

2427 APInt Sum = *ShAmtOuterC + *ShAmtInnerC;

2429 if (Modulo.isZero())

2431 Constant *ModuloC = ConstantInt::get(Ty, Modulo);

2433 {InnerOp, InnerOp, ModuloC});

2434 }

2435 }

2436

2437

2438

2439

2445 Mod, IID == Intrinsic::fshl ? Intrinsic::fshr : Intrinsic::fshl, Ty);

2447 }

2448

2449

2450

2453 Value *Op2 = II->getArgOperand(2);

2455 return BinaryOperator::CreateShl(Op0, And);

2456 }

2457

2458

2460 return &CI;

2461

2462

2463

2464

2466 break;

2470 return &CI;

2471 break;

2472 }

2473 case Intrinsic::ptrmask: {

2474 unsigned BitWidth = DL.getPointerTypeSizeInBits(II->getType());

2477 return II;

2478

2479 Value *InnerPtr, *InnerMask;

2481

2482

2483

2484 if (match(II->getArgOperand(0),

2486 m_Value(InnerMask))))) {

2487 assert(II->getArgOperand(1)->getType() == InnerMask->getType() &&

2488 "Mask types must match");

2489

2490

2491 Value *NewMask = Builder.CreateAnd(II->getArgOperand(1), InnerMask);

2495 }

2496

2497

2498 if (!CI.hasRetAttr(Attribute::NonNull) &&

2503 }

2504

2505 unsigned NewAlignmentLog =

2508

2509

2514 }

2516 return &CI;

2517 break;

2518 }

2519 case Intrinsic::uadd_with_overflow:

2520 case Intrinsic::sadd_with_overflow: {

2521 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))

2522 return I;

2523

2524

2525

2526

2528 const APInt *C0, *C1;

2529 Value *Arg0 = II->getArgOperand(0);

2530 Value *Arg1 = II->getArgOperand(1);

2531 bool IsSigned = IID == Intrinsic::sadd_with_overflow;

2532 bool HasNWAdd = IsSigned

2536 bool Overflow;

2538 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);

2539 if (!Overflow)

2541 *II, Builder.CreateBinaryIntrinsic(

2542 IID, X, ConstantInt::get(Arg1->getType(), NewC)));

2543 }

2544 break;

2545 }

2546

2547 case Intrinsic::umul_with_overflow:

2548 case Intrinsic::smul_with_overflow:

2549 case Intrinsic::usub_with_overflow:

2550 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))

2551 return I;

2552 break;

2553

2554 case Intrinsic::ssub_with_overflow: {

2555 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))

2556 return I;

2557

2559 Value *Arg0 = II->getArgOperand(0);

2560 Value *Arg1 = II->getArgOperand(1);

2561

2562

2563

2564

2567

2568

2570 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,

2571 Arg0, NegVal));

2572 }

2573

2574 break;

2575 }

2576

2577 case Intrinsic::uadd_sat:

2578 case Intrinsic::sadd_sat:

2579 case Intrinsic::usub_sat:

2580 case Intrinsic::ssub_sat: {

2582 Type *Ty = SI->getType();

2583 Value *Arg0 = SI->getLHS();

2584 Value *Arg1 = SI->getRHS();

2585

2586

2588 Arg0, Arg1, SI);

2589 switch (OR) {

2591 break;

2593 if (SI->isSigned())

2595 else

2598 unsigned BitWidth = Ty->getScalarSizeInBits();

2601 }

2603 unsigned BitWidth = Ty->getScalarSizeInBits();

2606 }

2607 }

2608

2609

2610

2611

2612

2615 if (IID == Intrinsic::usub_sat &&

2618 auto *NewC = Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, C, C1);

2619 auto *NewSub =

2620 Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, NewC, A);

2622 }

2623

2624

2625 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&

2626 C->isNotMinSignedValue()) {

2629 *II, Builder.CreateBinaryIntrinsic(

2630 Intrinsic::sadd_sat, Arg0, NegVal));

2631 }

2632

2633

2634

2635

2638 const APInt *Val, *Val2;

2640 bool IsUnsigned =

2641 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;

2642 if (Other->getIntrinsicID() == IID &&

2646 if (IsUnsigned)

2647 NewVal = Val->uadd_sat(*Val2);

2649 bool Overflow;

2650 NewVal = Val->sadd_ov(*Val2, Overflow);

2651 if (Overflow) {

2652

2653

2654 break;

2655 }

2656 } else {

2657

2658 break;

2659 }

2660

2662 *II, Builder.CreateBinaryIntrinsic(

2663 IID, X, ConstantInt::get(II->getType(), NewVal)));

2664 }

2665 }

2666 break;

2667 }

2668

2669 case Intrinsic::minnum:

2670 case Intrinsic::maxnum:

2671 case Intrinsic::minimum:

2672 case Intrinsic::maximum: {

2673 Value *Arg0 = II->getArgOperand(0);

2674 Value *Arg1 = II->getArgOperand(1);

2678

2679

2680

2682 switch (IID) {

2683 case Intrinsic::maxnum:

2684 NewIID = Intrinsic::minnum;

2685 break;

2686 case Intrinsic::minnum:

2687 NewIID = Intrinsic::maxnum;

2688 break;

2689 case Intrinsic::maximum:

2690 NewIID = Intrinsic::minimum;

2691 break;

2692 case Intrinsic::minimum:

2693 NewIID = Intrinsic::maximum;

2694 break;

2695 default:

2697 }

2698 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);

2699 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);

2701 return FNeg;

2702 }

2703

2704

2707 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&

2713 switch (IID) {

2714 case Intrinsic::maxnum:

2715 Res = maxnum(*C1, *C2);

2716 break;

2717 case Intrinsic::minnum:

2718 Res = minnum(*C1, *C2);

2719 break;

2720 case Intrinsic::maximum:

2721 Res = maximum(*C1, *C2);

2722 break;

2723 case Intrinsic::minimum:

2724 Res = minimum(*C1, *C2);

2725 break;

2726 default:

2728 }

2729

2730

2731

2733 IID, X, ConstantFP::get(Arg0->getType(), Res),

2736 }

2737 }

2738

2739

2742 X->getType() == Y->getType()) {

2743 Value *NewCall =

2744 Builder.CreateBinaryIntrinsic(IID, X, Y, II, II->getName());

2745 return new FPExtInst(NewCall, II->getType());

2746 }

2747

2748

2749

2750

2751

2752

2753

2754 auto IsMinMaxOrXNegX = [IID, &X](Value *Op0, Value *Op1) {

2756 return Op0->hasOneUse() ||

2757 (IID != Intrinsic::minimum && IID != Intrinsic::minnum);

2758 return false;

2759 };

2760

2761 if (IsMinMaxOrXNegX(Arg0, Arg1) || IsMinMaxOrXNegX(Arg1, Arg0)) {

2762 Value *R = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II);

2763 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)

2764 R = Builder.CreateFNegFMF(R, II);

2766 }

2767

2768 break;

2769 }

2770 case Intrinsic::matrix_multiply: {

2771

2772

2773

2779 return II;

2780 }

2781

2782 Value *Op0 = II->getOperand(0);

2783 Value *Op1 = II->getOperand(1);

2784 Value *OpNotNeg, *NegatedOp;

2785 unsigned NegatedOpArg, OtherOpArg;

2787 NegatedOp = Op0;

2788 NegatedOpArg = 0;

2789 OtherOpArg = 1;

2791 NegatedOp = Op1;

2792 NegatedOpArg = 1;

2793 OtherOpArg = 0;

2794 } else

2795

2796 break;

2797

2798

2800 break;

2801

2802 Value *OtherOp = II->getOperand(OtherOpArg);

2809

2812 Value *InverseOtherOp = Builder.CreateFNeg(OtherOp);

2815 return II;

2816 }

2817

2820 NewArgs[NegatedOpArg] = OpNotNeg;

2822 Builder.CreateIntrinsic(II->getType(), IID, NewArgs, II);

2824 }

2825 break;

2826 }

2827 case Intrinsic::fmuladd: {

2828

2831 II->getFastMathFlags(), SQ.getWithInstruction(II)))

2833 II->getFastMathFlags());

2834

2835 [[fallthrough]];

2836 }

2837 case Intrinsic::fma: {

2838

2839 Value *Src0 = II->getArgOperand(0);

2840 Value *Src1 = II->getArgOperand(1);

2841 Value *Src2 = II->getArgOperand(2);

2846 return II;

2847 }

2848

2849

2854 return II;

2855 }

2856

2857

2858

2860 SQ.getWithInstruction(II)))

2862

2863

2864

2865

2867 (match(Src2, m_PosZeroFP()) && II->getFastMathFlags().noSignedZeros()))

2869

2870

2873

2874 break;

2875 }

2876 case Intrinsic::copysign: {

2877 Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1);

2880 if (*KnownSignBit) {

2881

2882

2883 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);

2885 }

2886

2887

2888

2889 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);

2891 }

2892

2893

2894

2897 Value *CopySign =

2900 }

2901

2902

2903

2904

2907 APFloat PosMagC = *MagC;

2910 }

2911

2912

2913

2914

2917

2918 break;

2919 }

2920 case Intrinsic::fabs: {

2922 Value *Arg = II->getArgOperand(0);

2924

2926 CallInst *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II);

2928 }

2929

2931

2934 CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal});

2935 CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal});

2940 SI->setFastMathFlags(FMF1 | FMF2);

2941 return SI;

2942 }

2943

2946

2949 }

2950

2951 Value *Magnitude, *Sign;

2952 if (match(II->getArgOperand(0),

2954

2956 Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Magnitude, II);

2958 }

2959

2960 [[fallthrough]];

2961 }

2962 case Intrinsic::ceil:

2963 case Intrinsic:🤣

2964 case Intrinsic::round:

2965 case Intrinsic::roundeven:

2966 case Intrinsic::nearbyint:

2967 case Intrinsic::rint:

2968 case Intrinsic::trunc: {

2971

2972 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);

2973 return new FPExtInst(NarrowII, II->getType());

2974 }

2975 break;

2976 }

2977 case Intrinsic::cos:

2978 case Intrinsic::amdgcn_cos: {

2980 Value *Src = II->getArgOperand(0);

2983

2984

2985

2987 }

2988 break;

2989 }

2990 case Intrinsic::sin:

2991 case Intrinsic::amdgcn_sin: {

2994

2995 Value *NewSin = Builder.CreateUnaryIntrinsic(IID, X, II);

2997 }

2998 break;

2999 }

3000 case Intrinsic::ldexp: {

3001

3002

3003

3004

3005

3006

3007

3008

3009

3010

3011

3012

3013 Value *Src = II->getArgOperand(0);

3014 Value *Exp = II->getArgOperand(1);

3015 Value *InnerSrc;

3016 Value *InnerExp;

3019 Exp->getType() == InnerExp->getType()) {

3022

3025

3026

3027 Value *NewExp = Builder.CreateAdd(InnerExp, Exp);

3028 II->setArgOperand(1, NewExp);

3029 II->setFastMathFlags(InnerFlags);

3031 }

3032 }

3033

3034

3035

3040 Builder.CreateSelect(ExtSrc, ConstantFP::get(II->getType(), 2.0),

3041 ConstantFP::get(II->getType(), 1.0));

3043 }

3047 Builder.CreateSelect(ExtSrc, ConstantFP::get(II->getType(), 0.5),

3048 ConstantFP::get(II->getType(), 1.0));

3050 }

3051

3052

3053

3054

3055

3056 Value *SelectCond, *SelectLHS, *SelectRHS;

3057 if (match(II->getArgOperand(1),

3059 m_Value(SelectRHS))))) {

3060 Value *NewLdexp = nullptr;

3063 NewLdexp = Builder.CreateLdexp(Src, SelectLHS, II);

3064 Select = Builder.CreateSelect(SelectCond, NewLdexp, Src);

3066 NewLdexp = Builder.CreateLdexp(Src, SelectRHS, II);

3067 Select = Builder.CreateSelect(SelectCond, Src, NewLdexp);

3068 }

3069

3070 if (NewLdexp) {

3073 }

3074 }

3075

3076 break;

3077 }

3078 case Intrinsic::ptrauth_auth:

3079 case Intrinsic::ptrauth_resign: {

3080

3081

3082 if (II->hasOperandBundles())

3083 break;

3084

3085

3086

3087 bool NeedSign = II->getIntrinsicID() == Intrinsic::ptrauth_resign;

3088 Value *Ptr = II->getArgOperand(0);

3090 Value *Disc = II->getArgOperand(2);

3091

3092

3093

3094 Value *AuthKey = nullptr, *AuthDisc = nullptr, *BasePtr;

3096

3097

3099 break;

3100

3102 if (CI->getIntrinsicID() == Intrinsic::ptrauth_sign) {

3104 break;

3105 } else if (CI->getIntrinsicID() == Intrinsic::ptrauth_resign) {

3107 break;

3110 } else

3111 break;

3113

3114

3116 if (!CPA || !CPA->isKnownCompatibleWith(Key, Disc, DL))

3117 break;

3118

3119

3125 SignDisc, Null,

3126 Null);

3130 }

3131

3132

3133 BasePtr = Builder.CreatePtrToInt(CPA->getPointer(), II->getType());

3134 } else

3135 break;

3136

3137 unsigned NewIntrin;

3138 if (AuthKey && NeedSign) {

3139

3140 NewIntrin = Intrinsic::ptrauth_resign;

3141 } else if (AuthKey) {

3142

3143 NewIntrin = Intrinsic::ptrauth_auth;

3144 } else if (NeedSign) {

3145

3146 NewIntrin = Intrinsic::ptrauth_sign;

3147 } else {

3148

3151 }

3152

3155 if (AuthKey) {

3158 }

3159

3160 if (NeedSign) {

3161 CallArgs.push_back(II->getArgOperand(3));

3162 CallArgs.push_back(II->getArgOperand(4));

3163 }

3164

3168 }

3169 case Intrinsic::arm_neon_vtbl1:

3170 case Intrinsic::aarch64_neon_tbl1:

3173 break;

3174

3175 case Intrinsic::arm_neon_vmulls:

3176 case Intrinsic::arm_neon_vmullu:

3177 case Intrinsic::aarch64_neon_smull:

3178 case Intrinsic::aarch64_neon_umull: {

3179 Value *Arg0 = II->getArgOperand(0);

3180 Value *Arg1 = II->getArgOperand(1);

3181

3182

3185 }

3186

3187

3188 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||

3189 IID == Intrinsic::aarch64_neon_umull);

3193 Value *V0 = Builder.CreateIntCast(CV0, NewVT, !Zext);

3194 Value *V1 = Builder.CreateIntCast(CV1, NewVT, !Zext);

3196 }

3197

3198

3200 }

3201

3202

3206 if (Splat->isOne())

3208 !Zext);

3209

3210 break;

3211 }

3212 case Intrinsic::arm_neon_aesd:

3213 case Intrinsic::arm_neon_aese:

3214 case Intrinsic::aarch64_crypto_aesd:

3215 case Intrinsic::aarch64_crypto_aese:

3216 case Intrinsic::aarch64_sve_aesd:

3217 case Intrinsic::aarch64_sve_aese: {

3218 Value *DataArg = II->getArgOperand(0);

3219 Value *KeyArg = II->getArgOperand(1);

3220

3221

3224

3225

3231 return II;

3232 }

3233 break;

3234 }

3235 case Intrinsic::hexagon_V6_vandvrt:

3236 case Intrinsic::hexagon_V6_vandvrt_128B: {

3237

3240 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&

3241 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)

3242 break;

3243 Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1);

3246

3248 if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000))

3250 }

3251 break;

3252 }

3253 case Intrinsic::stackrestore: {

3254 enum class ClassifyResult {

3256 Alloca,

3257 StackRestore,

3258 CallWithSideEffects,

3259 };

3262 return ClassifyResult::Alloca;

3263

3266 if (II->getIntrinsicID() == Intrinsic::stackrestore)

3267 return ClassifyResult::StackRestore;

3268

3269 if (II->mayHaveSideEffects())

3270 return ClassifyResult::CallWithSideEffects;

3271 } else {

3272

3273 return ClassifyResult::CallWithSideEffects;

3274 }

3275 }

3276

3277 return ClassifyResult::None;

3278 };

3279

3280

3281

3282

3284 if (SS->getIntrinsicID() == Intrinsic::stacksave &&

3285 SS->getParent() == II->getParent()) {

3287 bool CannotRemove = false;

3288 for (++BI; &*BI != II; ++BI) {

3289 switch (Classify(&*BI)) {

3290 case ClassifyResult::None:

3291

3292 break;

3293

3294 case ClassifyResult::StackRestore:

3295

3296

3298 CannotRemove = true;

3299 break;

3300

3301 case ClassifyResult::Alloca:

3302 case ClassifyResult::CallWithSideEffects:

3303

3304

3305 CannotRemove = true;

3306 break;

3307 }

3308 if (CannotRemove)

3309 break;

3310 }

3311

3312 if (!CannotRemove)

3314 }

3315 }

3316

3317

3318

3320 Instruction *TI = II->getParent()->getTerminator();

3321 bool CannotRemove = false;

3322 for (++BI; &*BI != TI; ++BI) {

3323 switch (Classify(&*BI)) {

3324 case ClassifyResult::None:

3325

3326 break;

3327

3328 case ClassifyResult::StackRestore:

3329

3331

3332 case ClassifyResult::Alloca:

3333 case ClassifyResult::CallWithSideEffects:

3334

3335

3336

3337 CannotRemove = true;

3338 break;

3339 }

3340 if (CannotRemove)

3341 break;

3342 }

3343

3344

3345

3346

3349 break;

3350 }

3351 case Intrinsic::lifetime_end:

3352

3353

3354 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||

3355 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||

3356 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))

3357 break;

3358

3360 return I.getIntrinsicID() == Intrinsic::lifetime_start;

3361 }))

3362 return nullptr;

3363 break;

3364 case Intrinsic::assume: {

3365 Value *IIOperand = II->getArgOperand(0);

3367 II->getOperandBundlesAsDefs(OpBundles);

3368

3369

3370

3371

3377 return nullptr;

3378 };

3379

3380

3381

3384 return RemoveConditionFromAssume(Next);

3385

3386

3387

3388

3389 FunctionType *AssumeIntrinsicTy = II->getFunctionType();

3390 Value *AssumeIntrinsic = II->getCalledOperand();

3393 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles,

3394 II->getName());

3395 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());

3397 }

3398

3400 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,

3401 Builder.CreateNot(A), OpBundles, II->getName());

3402 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,

3403 Builder.CreateNot(B), II->getName());

3405 }

3406

3407

3408

3412 LHS->getOpcode() == Instruction::Load &&

3413 LHS->getType()->isPointerTy() &&

3416 LHS->setMetadata(LLVMContext::MD_nonnull, MD);

3417 LHS->setMetadata(LLVMContext::MD_noundef, MD);

3418 return RemoveConditionFromAssume(II);

3419

3420

3421

3422 }

3423

3424 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {

3426

3427

3428

3429

3430

3431 if (OBU.getTagName() == "separate_storage") {

3433 auto MaybeSimplifyHint = [&](const Use &U) {

3434 Value *Hint = U.get();

3435

3436

3440 };

3441 MaybeSimplifyHint(OBU.Inputs[0]);

3442 MaybeSimplifyHint(OBU.Inputs[1]);

3443 }

3444

3445

3446 if (OBU.getTagName() == "align" && OBU.Inputs.size() == 2) {

3449 if (!RK || RK.AttrKind != Attribute::Alignment ||

3451 continue;

3452

3453

3456

3457

3458

3459

3462 continue;

3463

3464

3465

3466

3470 if ((1ULL << TZ) < RK.ArgValue)

3471 continue;

3473 }

3474 }

3475

3476

3477

3478

3479

3480

3482 match(IIOperand,

3484 A->getType()->isPointerTy()) {

3487

3488 Replacement->insertBefore(Next->getIterator());

3489 AC.registerAssumption(Replacement);

3490 return RemoveConditionFromAssume(II);

3491 }

3492 }

3493

3494

3495

3496

3497

3498

3499

3500

3504 match(IIOperand,

3512

3513

3514

3515

3518 if (auto *Replacement =

3520

3521 Replacement->insertAfter(II->getIterator());

3522 AC.registerAssumption(Replacement);

3523 }

3524 return RemoveConditionFromAssume(II);

3525 }

3526 }

3527 }

3528

3529

3531 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {

3532 auto &BOI = II->bundle_op_info_begin()[Idx];

3535 if (BOI.End - BOI.Begin > 2)

3536 continue;

3537

3538

3543 if (CanonRK == RK)

3544 continue;

3545 if (!CanonRK) {

3546 if (BOI.End - BOI.Begin > 0) {

3547 Worklist.pushValue(II->op_begin()[BOI.Begin]);

3549 }

3550 continue;

3551 }

3553 if (BOI.End - BOI.Begin > 0)

3554 II->op_begin()[BOI.Begin].set(CanonRK.WasOn);

3555 if (BOI.End - BOI.Begin > 1)

3556 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(

3560 return II;

3561 }

3562 }

3563

3564

3565

3570

3571

3575 }

3576

3577

3578

3580 break;

3581 }

3582 case Intrinsic::experimental_guard: {

3583

3584

3585

3588

3590 break;

3592 }

3593 Value *NextCond = nullptr;

3594 if (match(NextInst,

3596 Value *CurrCond = II->getArgOperand(0);

3597

3598

3599

3600 if (CurrCond != NextCond) {

3602 while (MoveI != NextInst) {

3603 auto *Temp = MoveI;

3606 }

3608 }

3610 return II;

3611 }

3612 break;

3613 }

3614 case Intrinsic::vector_insert: {

3615 Value *Vec = II->getArgOperand(0);

3616 Value *SubVec = II->getArgOperand(1);

3617 Value *Idx = II->getArgOperand(2);

3621

3622

3623

3624 if (DstTy && VecTy && SubVecTy) {

3625 unsigned DstNumElts = DstTy->getNumElements();

3626 unsigned VecNumElts = VecTy->getNumElements();

3627 unsigned SubVecNumElts = SubVecTy->getNumElements();

3629

3630

3631 if (VecNumElts == SubVecNumElts)

3633

3634

3635

3636

3637

3639 unsigned i;

3640 for (i = 0; i != SubVecNumElts; ++i)

3642 for (; i != VecNumElts; ++i)

3644

3645 Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask);

3646

3648 for (unsigned i = 0; i != IdxN; ++i)

3649 Mask.push_back(i);

3650 for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)

3651 Mask.push_back(i);

3652 for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)

3653 Mask.push_back(i);

3654

3655 Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask);

3657 }

3658 break;

3659 }

3660 case Intrinsic::vector_extract: {

3661 Value *Vec = II->getArgOperand(0);

3662 Value *Idx = II->getArgOperand(1);

3663

3664 Type *ReturnType = II->getType();

3665

3666

3668 Value *InsertTuple, *InsertIdx, *InsertValue;

3672 InsertValue->getType() == ReturnType) {

3674

3675

3676

3677 if (ExtractIdx == Index)

3679

3680

3681

3682

3683

3684 else

3686 }

3687

3690

3691 if (DstTy && VecTy) {

3692 auto DstEltCnt = DstTy->getElementCount();

3693 auto VecEltCnt = VecTy->getElementCount();

3695

3696

3697 if (DstEltCnt == VecTy->getElementCount()) {

3700 }

3701

3702

3703

3704 if (VecEltCnt.isScalable() || DstEltCnt.isScalable())

3705 break;

3706

3708 for (unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i)

3709 Mask.push_back(IdxN + i);

3710

3711 Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask);

3713 }

3714 break;

3715 }

3716 case Intrinsic::experimental_vp_reverse: {

3718 Value *Vec = II->getArgOperand(0);

3719 Value *Mask = II->getArgOperand(1);

3721 break;

3722 Value *EVL = II->getArgOperand(2);

3723

3724

3730 OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(),

3731 II->getIterator());

3733 }

3734 break;

3735 }

3736 case Intrinsic::vector_reduce_or:

3737 case Intrinsic::vector_reduce_and: {

3738

3739

3740

3741

3742

3743

3744

3745 Value *Arg = II->getArgOperand(0);

3747

3748 if (Value *NewOp =

3751 return II;

3752 }

3753

3756 if (FTy->getElementType() == Builder.getInt1Ty()) {

3758 Vect, Builder.getIntNTy(FTy->getNumElements()));

3759 if (IID == Intrinsic::vector_reduce_and) {

3760 Res = Builder.CreateICmpEQ(

3762 } else {

3763 assert(IID == Intrinsic::vector_reduce_or &&

3764 "Expected or reduction.");

3765 Res = Builder.CreateIsNotNull(Res);

3766 }

3767 if (Arg != Vect)

3769 II->getType());

3771 }

3772 }

3773 [[fallthrough]];

3774 }

3775 case Intrinsic::vector_reduce_add: {

3776 if (IID == Intrinsic::vector_reduce_add) {

3777

3778

3779

3780

3781

3782

3783 Value *Arg = II->getArgOperand(0);

3785

3786 if (Value *NewOp =

3789 return II;

3790 }

3791

3794 if (FTy->getElementType() == Builder.getInt1Ty()) {

3796 Vect, Builder.getIntNTy(FTy->getNumElements()));

3797 Value *Res = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V);

3798 if (Res->getType() != II->getType())

3799 Res = Builder.CreateZExtOrTrunc(Res, II->getType());

3800 if (Arg != Vect &&

3802 Res = Builder.CreateNeg(Res);

3804 }

3805 }

3806

3807

3811 if (VecToReduceCount.isFixed()) {

3812 unsigned VectorSize = VecToReduceCount.getFixedValue();

3813 return BinaryOperator::CreateMul(

3815 ConstantInt::get(Splat->getType(), VectorSize, false,

3816 true));

3817 }

3818 }

3819 }

3820 [[fallthrough]];

3821 }

3822 case Intrinsic::vector_reduce_xor: {

3823 if (IID == Intrinsic::vector_reduce_xor) {

3824

3825

3826

3827

3828

3829

3830

3831 Value *Arg = II->getArgOperand(0);

3833

3834 if (Value *NewOp =

3837 return II;

3838 }

3839

3842 if (VTy->getElementType() == Builder.getInt1Ty()) {

3844 if (Arg != Vect)

3846 II->getType());

3848 }

3849 }

3850 }

3851 [[fallthrough]];

3852 }

3853 case Intrinsic::vector_reduce_mul: {

3854 if (IID == Intrinsic::vector_reduce_mul) {

3855

3856

3857

3858

3859

3860

3861 Value *Arg = II->getArgOperand(0);

3863

3864 if (Value *NewOp =

3867 return II;

3868 }

3869

3872 if (VTy->getElementType() == Builder.getInt1Ty()) {

3874 if (Res->getType() != II->getType())

3875 Res = Builder.CreateZExt(Res, II->getType());

3877 }

3878 }

3879 }

3880 [[fallthrough]];

3881 }

3882 case Intrinsic::vector_reduce_umin:

3883 case Intrinsic::vector_reduce_umax: {

3884 if (IID == Intrinsic::vector_reduce_umin ||

3885 IID == Intrinsic::vector_reduce_umax) {

3886

3887

3888

3889

3890

3891

3892 Value *Arg = II->getArgOperand(0);

3894

3895 if (Value *NewOp =

3898 return II;

3899 }

3900

3903 if (VTy->getElementType() == Builder.getInt1Ty()) {

3904 Value *Res = IID == Intrinsic::vector_reduce_umin

3905 ? Builder.CreateAndReduce(Vect)

3906 : Builder.CreateOrReduce(Vect);

3907 if (Arg != Vect)

3909 II->getType());

3911 }

3912 }

3913 }

3914 [[fallthrough]];

3915 }

3916 case Intrinsic::vector_reduce_smin:

3917 case Intrinsic::vector_reduce_smax: {

3918 if (IID == Intrinsic::vector_reduce_smin ||

3919 IID == Intrinsic::vector_reduce_smax) {

3920

3921

3922

3923

3924

3925

3926

3927

3928

3929

3930

3931

3932

3933

3934 Value *Arg = II->getArgOperand(0);

3936

3937 if (Value *NewOp =

3940 return II;

3941 }

3942

3945 if (VTy->getElementType() == Builder.getInt1Ty()) {

3947 if (Arg != Vect)

3949 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==

3950 (ExtOpc == Instruction::CastOps::ZExt))

3951 ? Builder.CreateAndReduce(Vect)

3952 : Builder.CreateOrReduce(Vect);

3953 if (Arg != Vect)

3954 Res = Builder.CreateCast(ExtOpc, Res, II->getType());

3956 }

3957 }

3958 }

3959 [[fallthrough]];

3960 }

3961 case Intrinsic::vector_reduce_fmax:

3962 case Intrinsic::vector_reduce_fmin:

3963 case Intrinsic::vector_reduce_fadd:

3964 case Intrinsic::vector_reduce_fmul: {

3965 bool CanReorderLanes = (IID != Intrinsic::vector_reduce_fadd &&

3966 IID != Intrinsic::vector_reduce_fmul) ||

3967 II->hasAllowReassoc();

3968 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||

3969 IID == Intrinsic::vector_reduce_fmul)

3970 ? 1

3971 : 0;

3972 Value *Arg = II->getArgOperand(ArgIdx);

3974 replaceUse(II->getOperandUse(ArgIdx), NewOp);

3975 return nullptr;

3976 }

3977 break;

3978 }

3979 case Intrinsic::is_fpclass: {

3981 return I;

3982 break;

3983 }

3984 case Intrinsic::threadlocal_address: {

3989 return II;

3990 }

3991 break;

3992 }

3993 case Intrinsic::frexp: {

3995

3996

3997

4000 X = Builder.CreateInsertValue(

4002 1);

4004 }

4005 }

4006 break;

4007 }

4008 case Intrinsic::get_active_lane_mask: {

4009 const APInt *Op0, *Op1;

4012 Type *OpTy = II->getOperand(0)->getType();

4015 II->getType(), Intrinsic::get_active_lane_mask,

4016 {Constant::getNullValue(OpTy),

4017 ConstantInt::get(OpTy, Op1->usub_sat(*Op0))}));

4018 }

4019 break;

4020 }

4021 case Intrinsic::experimental_get_vector_length: {

4022

4024 std::max(II->getArgOperand(0)->getType()->getScalarSizeInBits(),

4025 II->getType()->getScalarSizeInBits());

4028 SQ.getWithInstruction(II))

4031 ->getValue()

4034 MaxLanes = MaxLanes.multiply(

4036

4039 *II, Builder.CreateZExtOrTrunc(II->getArgOperand(0), II->getType()));

4040 return nullptr;

4041 }

4042 default: {

4043

4045 if (V)

4046 return *V;

4047 break;

4048 }

4049 }

4050

4051

4052

4053

4054

4055

4056

4060 bool IsVectorCond = Sel->getCondition()->getType()->isVectorTy();

4062 continue;

4063

4064

4065 bool SimplifyBothArms =

4066 Op->getType()->isVectorTy() && II->getType()->isVectorTy();

4068 *II, Sel, false, SimplifyBothArms))

4069 return R;

4070 }

4073 return R;

4074 }

4075 }

4076

4078 return Shuf;

4079

4082

4085

4086

4087

4088 return visitCallBase(*II);

4089}

4090

4091

4094

4095

4098

4099

4100 auto isIdenticalOrStrongerFence = [](FenceInst *FI1, FenceInst *FI2) {

4102

4103 if (FI1SyncScope != FI2->getSyncScopeID() ||

4106 return false;

4107

4109 };

4110 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))

4112

4114 if (isIdenticalOrStrongerFence(PFI, &FI))

4116 return nullptr;

4117}

4118

4119

4121 return visitCallBase(II);

4122}

4123

4124

4126 return visitCallBase(CBI);

4127}

4128

4130 if (!CI->hasFnAttr("modular-format"))

4131 return nullptr;

4132

4135

4136 unsigned FirstArgIdx;

4137 [[maybe_unused]] bool Error;

4138 Error = Args[2].getAsInteger(10, FirstArgIdx);

4139 assert(Error && "invalid first arg index");

4140 --FirstArgIdx;

4144

4145 if (AllAspects.empty())

4146 return nullptr;

4147

4149 for (StringRef Aspect : AllAspects) {

4150 if (Aspect == "float") {

4154 [](Value *V) { return V->getType()->isFloatingPointTy(); }))

4155 NeededAspects.push_back("float");

4156 } else {

4157

4158 NeededAspects.push_back(Aspect);

4159 }

4160 }

4161

4162 if (NeededAspects.size() == AllAspects.size())

4163 return nullptr;

4164

4169 FnName, Callee->getFunctionType(),

4170 Callee->getAttributes().removeFnAttribute(Ctx, "modular-format"));

4172 New->setCalledFunction(ModularFn);

4173 New->removeFnAttr("modular-format");

4174 B.Insert(New);

4175

4176 const auto ReferenceAspect = [&](StringRef Aspect) {

4178 Name += '_';

4179 Name += Aspect;

4182 B.CreateCall(RelocNoneFn,

4184 };

4185

4187 for (StringRef Request : NeededAspects)

4188 ReferenceAspect(Request);

4189

4190 return New;

4191}

4192

4195

4196

4197

4198

4200 return nullptr;

4201

4202 auto InstCombineRAUW = [this](Instruction *From, Value *With) {

4204 };

4205 auto InstCombineErase = [this](Instruction *I) {

4207 };

4209 InstCombineRAUW, InstCombineErase);

4210 if (Value *With = Simplifier.optimizeCall(CI, Builder)) {

4211 ++NumSimplified;

4213 }

4215 ++NumSimplified;

4217 }

4218

4219 return nullptr;

4220}

4221

4223

4224

4226 if (Underlying != TrampMem &&

4227 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))

4228 return nullptr;

4230 return nullptr;

4231

4233 for (User *U : TrampMem->users()) {

4235 if (II)

4236 return nullptr;

4237 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {

4238 if (InitTrampoline)

4239

4240 return nullptr;

4241 InitTrampoline = II;

4242 continue;

4243 }

4244 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)

4245

4246 continue;

4247 return nullptr;

4248 }

4249

4250

4251 if (!InitTrampoline)

4252 return nullptr;

4253

4254

4255 if (InitTrampoline->getOperand(0) != TrampMem)

4256 return nullptr;

4257

4258 return InitTrampoline;

4259}

4260

4262 Value *TrampMem) {

4263

4264

4266 E = AdjustTramp->getParent()->begin();

4267 I != E;) {

4270 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&

4271 II->getOperand(0) == TrampMem)

4272 return II;

4274 return nullptr;

4275 }

4276 return nullptr;

4277}

4278

4279

4280

4281

4283 Callee = Callee->stripPointerCasts();

4285 if (!AdjustTramp ||

4286 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)

4287 return nullptr;

4288

4290

4292 return IT;

4294 return IT;

4295 return nullptr;

4296}

4297

4301 if (!IPC || !IPC->isNoopCast(DL))

4302 return nullptr;

4303

4305 if (II)

4306 return nullptr;

4307

4309 if (IIID != Intrinsic::ptrauth_resign && IIID != Intrinsic::ptrauth_sign)

4310 return nullptr;

4311

4312

4313 std::optional PtrAuthBundleOrNone;

4318 PtrAuthBundleOrNone = Bundle;

4319 else

4321 }

4322

4323 if (!PtrAuthBundleOrNone)

4324 return nullptr;

4325

4326 Value *NewCallee = nullptr;

4327 switch (IIID) {

4328

4329

4330 case Intrinsic::ptrauth_resign: {

4331

4332 if (II->getOperand(3) != PtrAuthBundleOrNone->Inputs[0])

4333 return nullptr;

4334

4335 if (II->getOperand(4) != PtrAuthBundleOrNone->Inputs[1])

4336 return nullptr;

4337

4338

4339

4340 if (II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])

4341 return nullptr;

4342

4343 Value *NewBundleOps[] = {II->getOperand(1), II->getOperand(2)};

4344 NewBundles.emplace_back("ptrauth", NewBundleOps);

4345 NewCallee = II->getOperand(0);

4346 break;

4347 }

4348

4349

4350

4351

4352 case Intrinsic::ptrauth_sign: {

4353

4354 if (II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])

4355 return nullptr;

4356

4357 if (II->getOperand(2) != PtrAuthBundleOrNone->Inputs[1])

4358 return nullptr;

4359 NewCallee = II->getOperand(0);

4360 break;

4361 }

4362 default:

4364 }

4365

4366 if (!NewCallee)

4367 return nullptr;

4368

4369 NewCallee = Builder.CreateBitOrPointerCast(NewCallee, Callee->getType());

4372 return NewCall;

4373}

4374

4377 if (!CPA)

4378 return nullptr;

4379

4381

4382 if (!CalleeF)

4383 return nullptr;

4384

4385

4387 if (!PAB)

4388 return nullptr;

4389

4392

4393

4394 if (!CPA->isKnownCompatibleWith(Key, Discriminator, DL))

4395 return nullptr;

4396

4397

4400 return NewCall;

4401}

4402

4403bool InstCombinerImpl::annotateAnyAllocSite(CallBase &Call,

4405

4406

4407

4408

4410

4413

4416

4417

4422 } else {

4426 }

4427 }

4428

4429

4431 if (!Alignment)

4433

4436 uint64_t AlignmentVal = AlignOpC->getZExtValue();

4439 Align NewAlign = Align(AlignmentVal);

4440 if (NewAlign > ExistingAlign) {

4444 }

4445 }

4446 }

4448}

4449

4450

4453

4454

4455

4456

4457 SmallVector<unsigned, 4> ArgNos;

4458 unsigned ArgNo = 0;

4459

4461 if (V->getType()->isPointerTy()) {

4462

4463

4466 (HasDereferenceable &&

4468 V->getType()->getPointerAddressSpace()))) {

4469 if (Value *Res = simplifyNonNullOperand(V, HasDereferenceable)) {

4472 }

4476 }

4477 }

4478 ArgNo++;

4479 }

4480

4481 assert(ArgNo == Call.arg_size() && "Call arguments not processed correctly.");

4482

4483 if (!ArgNos.empty()) {

4486 AS = AS.addParamAttribute(Ctx, ArgNos,

4490 }

4491

4492

4493

4497 transformConstExprCastCall(Call))

4498 return nullptr;

4499

4500 if (CalleeF) {

4501

4505 << "\n");

4507 return &Call;

4508 }

4509

4510

4511

4512

4518

4519

4520

4524

4525

4530

4531

4532

4536 return nullptr;

4537 }

4538 }

4539

4540

4541

4545

4546

4549

4551

4552 return nullptr;

4553 }

4554

4555

4558 }

4559

4561 return transformCallThroughTrampoline(Call, *II);

4562

4563

4564 if (Instruction *NewCall = foldPtrAuthIntrinsicCallee(Call))

4565 return NewCall;

4566

4567

4568 if (Instruction *NewCall = foldPtrAuthConstantCallee(Call))

4569 return NewCall;

4570

4573 if (IA->canThrow()) {

4574

4575

4578 }

4579 }

4580

4581

4582

4583

4586

4587

4589 }

4590

4594 Type *RetArgTy = ReturnedArg->getType();

4597 Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy));

4598 }

4599

4600

4601

4605 }

4606

4607

4608

4612 if (CalleeF) {

4613 ConstantInt *FunctionType = nullptr;

4615

4616 if (MDNode *MD = CalleeF->getMetadata(LLVMContext::MD_kcfi_type))

4618

4619 if (FunctionType &&

4623 << ": call to " << CalleeF->getName()

4624 << " using a mismatching function pointer type\n";

4625 }

4626 });

4627

4629 }

4630

4633

4634

4636 case Intrinsic::experimental_gc_statepoint: {

4638 SmallPtrSet<Value *, 32> LiveGcValues;

4639 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {

4640 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);

4641

4642

4645 continue;

4646 }

4647

4650

4651

4655 continue;

4656 }

4657

4659

4660

4661

4663

4666 continue;

4667 }

4668

4669

4670 if (!GCR.hasRetAttr(Attribute::NonNull) &&

4674

4675 Worklist.pushUsersToWorkList(GCR);

4676 }

4677 }

4678

4679

4680

4685 }

4686

4687

4688

4689

4690

4691 LiveGcValues.insert(BasePtr);

4692 LiveGcValues.insert(DerivedPtr);

4693 }

4694 std::optional Bundle =

4696 unsigned NumOfGCLives = LiveGcValues.size();

4697 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())

4698 break;

4699

4700 DenseMap<Value *, unsigned> Val2Idx;

4701 std::vector<Value *> NewLiveGc;

4702 for (Value *V : Bundle->Inputs) {

4704 if (!Inserted)

4705 continue;

4706 if (LiveGcValues.count(V)) {

4707 It->second = NewLiveGc.size();

4708 NewLiveGc.push_back(V);

4709 } else

4710 It->second = NumOfGCLives;

4711 }

4712

4713 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {

4714 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);

4716 assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&

4717 "Missed live gc for base pointer");

4719 GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));

4721 assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&

4722 "Missed live gc for derived pointer");

4724 GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));

4725 }

4726

4729 }

4730 default: { break; }

4731 }

4732

4734}

4735

4736

4737

4738

4739bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {

4742 if (!Callee)

4743 return false;

4744

4746 "CallBr's don't have a single point after a def to insert at");

4747

4748

4749

4750

4751 if (Callee->isDeclaration())

4752 return false;

4753

4754

4755

4756

4757 if (Callee->hasFnAttribute("thunk"))

4758 return false;

4759

4760

4761

4762

4763 if (Callee->hasFnAttribute(Attribute::Naked))

4764 return false;

4765

4766

4767

4768

4769

4771 return false;

4772

4775

4776

4777

4778

4779 FunctionType *FT = Callee->getFunctionType();

4781 Type *NewRetTy = FT->getReturnType();

4782

4783

4784 if (OldRetTy != NewRetTy) {

4785

4787 return false;

4788

4790 if (Caller->use_empty())

4791 return false;

4792 }

4793

4794 if (!CallerPAL.isEmpty() && Caller->use_empty()) {

4795 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());

4796 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(

4797 NewRetTy, CallerPAL.getRetAttrs())))

4798 return false;

4799 }

4800

4801

4802

4803

4804

4805 if (Caller->use_empty()) {

4806 BasicBlock *PhisNotSupportedBlock = nullptr;

4808 PhisNotSupportedBlock = II->getNormalDest();

4809 if (PhisNotSupportedBlock)

4810 for (User *U : Caller->users())

4812 if (PN->getParent() == PhisNotSupportedBlock)

4813 return false;

4814 }

4815 }

4816

4818 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);

4819

4820

4821

4822

4823

4824

4825

4826

4827

4828 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||

4829 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))

4830 return false;

4831

4833 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {

4834 Type *ParamTy = FT->getParamType(i);

4835 Type *ActTy = (*AI)->getType();

4836

4838 return false;

4839

4840

4841 if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i))

4842 .overlaps(AttributeFuncs::typeIncompatible(

4843 ParamTy, CallerPAL.getParamAttrs(i),

4844 AttributeFuncs::ASK_UNSAFE_TO_DROP)))

4845 return false;

4846

4848 CallerPAL.hasParamAttr(i, Attribute::Preallocated))

4849 return false;

4850

4851 if (CallerPAL.hasParamAttr(i, Attribute::SwiftError))

4852 return false;

4853

4854 if (CallerPAL.hasParamAttr(i, Attribute::ByVal) !=

4855 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))

4856 return false;

4857 }

4858

4859 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&

4860 !CallerPAL.isEmpty()) {

4861

4862

4863

4864 unsigned SRetIdx;

4865 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&

4866 SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams())

4867 return false;

4868 }

4869

4870

4871

4872 SmallVector<Value *, 8> Args;

4874 Args.reserve(NumActualArgs);

4875 ArgAttrs.reserve(NumActualArgs);

4876

4877

4878 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());

4879

4880

4881

4882 RAttrs.remove(

4883 AttributeFuncs::typeIncompatible(NewRetTy, CallerPAL.getRetAttrs()));

4884

4887 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {

4888 Type *ParamTy = FT->getParamType(i);

4889

4890 Value *NewArg = *AI;

4891 if ((*AI)->getType() != ParamTy)

4892 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);

4893 Args.push_back(NewArg);

4894

4895

4896

4897 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(

4898 ParamTy, CallerPAL.getParamAttrs(i), AttributeFuncs::ASK_SAFE_TO_DROP);

4900 CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs));

4901 }

4902

4903

4904

4905 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {

4907 ArgAttrs.push_back(AttributeSet());

4908 }

4909

4910

4911 if (FT->getNumParams() < NumActualArgs) {

4912

4913 if (FT->isVarArg()) {

4914

4915 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {

4917 Value *NewArg = *AI;

4918 if (PTy != (*AI)->getType()) {

4919

4922 NewArg = Builder.CreateCast(opcode, *AI, PTy);

4923 }

4924 Args.push_back(NewArg);

4925

4926

4927 ArgAttrs.push_back(CallerPAL.getParamAttrs(i));

4928 }

4929 }

4930 }

4931

4932 AttributeSet FnAttrs = CallerPAL.getFnAttrs();

4933

4935 Caller->setName("");

4936

4937 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&

4938 "missing argument attributes");

4939 AttributeList NewCallerPAL = AttributeList::get(

4941

4944

4945 CallBase *NewCall;

4947 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),

4948 II->getUnwindDest(), Args, OpBundles);

4949 } else {

4950 NewCall = Builder.CreateCall(Callee, Args, OpBundles);

4953 }

4957

4958

4959 NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof});

4960

4961

4964 if (OldRetTy != NV->getType() && Caller->use_empty()) {

4965 assert(NV->getType()->isVoidTy());

4967 NC->setDebugLoc(Caller->getDebugLoc());

4968

4970 assert(OptInsertPt && "No place to insert cast");

4972 Worklist.pushUsersToWorkList(*Caller);

4973 }

4974

4975 if (Caller->use_empty())

4977 else if (Caller->hasValueHandle()) {

4978 if (OldRetTy == NV->getType())

4980 else

4981

4982

4984 }

4985

4987 return true;

4988}

4989

4990

4991

4993InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call,

4997

4998

4999

5000 if (Attrs.hasAttrSomewhere(Attribute::Nest))

5001 return nullptr;

5002

5005

5006 AttributeList NestAttrs = NestF->getAttributes();

5007 if (!NestAttrs.isEmpty()) {

5008 unsigned NestArgNo = 0;

5009 Type *NestTy = nullptr;

5010 AttributeSet NestAttr;

5011

5012

5014 E = NestFTy->param_end();

5015 I != E; ++NestArgNo, ++I) {

5016 AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo);

5018

5019 NestTy = *I;

5020 NestAttr = AS;

5021 break;

5022 }

5023 }

5024

5025 if (NestTy) {

5026 std::vector<Value*> NewArgs;

5027 std::vector NewArgAttrs;

5030

5031

5032

5033

5034 {

5035 unsigned ArgNo = 0;

5037 do {

5038 if (ArgNo == NestArgNo) {

5039

5041 if (NestVal->getType() != NestTy)

5042 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");

5043 NewArgs.push_back(NestVal);

5044 NewArgAttrs.push_back(NestAttr);

5045 }

5046

5047 if (I == E)

5048 break;

5049

5050

5051 NewArgs.push_back(*I);

5052 NewArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));

5053

5054 ++ArgNo;

5055 ++I;

5056 } while (true);

5057 }

5058

5059

5060

5061

5062

5063 std::vector<Type*> NewTypes;

5064 NewTypes.reserve(FTy->getNumParams()+1);

5065

5066

5067

5068 {

5069 unsigned ArgNo = 0;

5071 E = FTy->param_end();

5072

5073 do {

5074 if (ArgNo == NestArgNo)

5075

5076 NewTypes.push_back(NestTy);

5077

5078 if (I == E)

5079 break;

5080

5081

5082 NewTypes.push_back(*I);

5083

5084 ++ArgNo;

5085 ++I;

5086 } while (true);

5087 }

5088

5089

5090

5091 FunctionType *NewFTy =

5092 FunctionType::get(FTy->getReturnType(), NewTypes, FTy->isVarArg());

5093 AttributeList NewPAL =

5094 AttributeList::get(FTy->getContext(), Attrs.getFnAttrs(),

5095 Attrs.getRetAttrs(), NewArgAttrs);

5096

5099

5103 II->getUnwindDest(), NewArgs, OpBundles);

5107 NewCaller =

5109 CBI->getIndirectDests(), NewArgs, OpBundles);

5110 cast(NewCaller)->setCallingConv(CBI->getCallingConv());

5112 } else {

5113 NewCaller = CallInst::Create(NewFTy, NestF, NewArgs, OpBundles);

5119 }

5121

5122 return NewCaller;

5123 }

5124 }

5125

5126

5127

5128

5130 return &Call;

5131}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

AMDGPU Register Bank Select

This file declares a class to represent arbitrary precision floating point values and provide a varie...

This file implements a class to represent arbitrary precision integral constant values and operations...

This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))

Atomic ordering constants.

This file contains the simple types necessary to represent the attributes associated with functions a...

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

This file contains the declarations for the subclasses of Constant, which represent the different fla...

static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG)

static Type * getPromotedType(Type *Ty)

Return the specified type promoted as it would be to pass though a va_arg area.

Definition InstCombineCalls.cpp:97

static Instruction * createOverflowTuple(IntrinsicInst *II, Value *Result, Constant *Overflow)

Creates a result tuple for an overflow intrinsic II with a given Result and a constant Overflow value...

Definition InstCombineCalls.cpp:851

static IntrinsicInst * findInitTrampolineFromAlloca(Value *TrampMem)

Definition InstCombineCalls.cpp:4222

static bool removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, std::function< bool(const IntrinsicInst &)> IsStart)

Definition InstCombineCalls.cpp:800

static bool inputDenormalIsDAZ(const Function &F, const Type *Ty)

Definition InstCombineCalls.cpp:904

static Instruction * reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)

If this min/max has a matching min/max operand with a constant, try to push the constant operand into...

Definition InstCombineCalls.cpp:1323

static bool isIdempotentBinaryIntrinsic(Intrinsic::ID IID)

Helper to match idempotent binary intrinsics, namely, intrinsics where f(f(x, y), y) == f(x,...

Definition InstCombineCalls.cpp:1544

static bool signBitMustBeTheSame(Value *Op0, Value *Op1, const SimplifyQuery &SQ)

Return true if two values Op0 and Op1 are known to have the same sign.

Definition InstCombineCalls.cpp:1137

static Value * optimizeModularFormat(CallInst *CI, IRBuilderBase &B)

Definition InstCombineCalls.cpp:4129

static Instruction * moveAddAfterMinMax(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)

Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0.

Definition InstCombineCalls.cpp:1150

static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombinerImpl &IC)

This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...

Definition InstCombineCalls.cpp:446

static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)

Definition InstCombineCalls.cpp:780

static std::optional< bool > getKnownSign(Value *Op, const SimplifyQuery &SQ)

Definition InstCombineCalls.cpp:1110

static cl::opt< unsigned > GuardWideningWindow("instcombine-guard-widening-window", cl::init(3), cl::desc("How wide an instruction window to bypass looking for " "another guard"))

static bool hasUndefSource(AnyMemTransferInst *MI)

Recognize a memcpy/memmove from a trivially otherwise unused alloca.

Definition InstCombineCalls.cpp:108

static Instruction * factorizeMinMaxTree(IntrinsicInst *II)

Reduce a sequence of min/max intrinsics with a common operand.

Definition InstCombineCalls.cpp:1351

static Value * simplifyNeonTbl1(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)

Convert a table lookup to shufflevector if the mask is constant.

Definition InstCombineCalls.cpp:744

static Instruction * foldClampRangeOfTwo(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)

If we have a clamp pattern like max (min X, 42), 41 – where the output can only be one of two possibl...

Definition InstCombineCalls.cpp:1249

static Value * simplifyReductionOperand(Value *Arg, bool CanReorderLanes)

Definition InstCombineCalls.cpp:1587

static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)

Definition InstCombineCalls.cpp:4261

static Value * foldIntrinsicUsingDistributiveLaws(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)

Definition InstCombineCalls.cpp:1675

static std::optional< bool > getKnownSignOrZero(Value *Op, const SimplifyQuery &SQ)

Definition InstCombineCalls.cpp:1124

static Value * foldMinimumOverTrailingOrLeadingZeroCount(Value *I0, Value *I1, const DataLayout &DL, InstCombiner::BuilderTy &Builder)

Fold an unsigned minimum of trailing or leading zero bits counts: umin(cttz(CtOp, ZeroUndef),...

Definition InstCombineCalls.cpp:1620

static Value * foldIdempotentBinaryIntrinsicRecurrence(InstCombinerImpl &IC, IntrinsicInst *II)

Attempt to simplify value-accumulating recurrences of kind: umax.acc = phi i8 [ umax,...

Definition InstCombineCalls.cpp:1567

static Instruction * foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC)

Definition InstCombineCalls.cpp:652

static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC)

Definition InstCombineCalls.cpp:476

static IntrinsicInst * findInitTrampoline(Value *Callee)

Definition InstCombineCalls.cpp:4282

static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, const Function &F, Type *Ty)

Definition InstCombineCalls.cpp:912

static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)

Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".

Definition InstCombineCalls.cpp:1653

static Value * reassociateMinMaxWithConstants(IntrinsicInst *II, IRBuilderBase &Builder, const SimplifyQuery &SQ)

If this min/max has a constant operand and an operand that is a matching min/max with a constant oper...

Definition InstCombineCalls.cpp:1289

static CallInst * canonicalizeConstantArg0ToArg1(CallInst &Call)

Definition InstCombineCalls.cpp:838

This file provides internal interfaces used to implement the InstCombine.

This file provides the interface for the instcombine pass implementation.

static bool hasNoSignedWrap(BinaryOperator &I)

static bool inputDenormalIsIEEE(DenormalMode Mode)

Return true if it's possible to assume IEEE treatment of input denormals in F for Val.

static const Function * getCalledFunction(const Value *V)

ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))

uint64_t IntrinsicInst * II

if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod

const SmallVectorImpl< MachineOperand > & Cond

This file implements the SmallBitVector class.

This file defines the SmallVector class.

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

#define DEBUG_WITH_TYPE(TYPE,...)

DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.

static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")

static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")

static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)

Returns the opcode of Values or ~0 if they do not all agree.

Class for arbitrary precision integers.

static APInt getAllOnes(unsigned numBits)

Return an APInt of a specified width with all bits set.

static APInt getSignMask(unsigned BitWidth)

Get the SignMask for a specific bit width.

bool sgt(const APInt &RHS) const

Signed greater than comparison.

LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const

bool ugt(const APInt &RHS) const

Unsigned greater than comparison.

bool isZero() const

Determine if this value is zero, i.e. all bits are clear.

LLVM_ABI APInt urem(const APInt &RHS) const

Unsigned remainder operation.

unsigned getBitWidth() const

Return the number of bits in the APInt.

bool ult(const APInt &RHS) const

Unsigned less than comparison.

LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const

LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const

static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)

Return a value containing V broadcasted over NewLen bits.

static APInt getSignedMinValue(unsigned numBits)

Gets minimum signed value of APInt for a specific bit width.

LLVM_ABI APInt uadd_sat(const APInt &RHS) const

bool isNonNegative() const

Determine if this APInt Value is non-negative (>= 0)

static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)

Constructs an APInt value that has the bottom loBitsSet bits set.

static APInt getZero(unsigned numBits)

Get the '0' value for the specified bit-width.

LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const

static APSInt getMinValue(uint32_t numBits, bool Unsigned)

Return the APSInt representing the minimum integer value with the given bit width and signedness.

static APSInt getMaxValue(uint32_t numBits, bool Unsigned)

Return the APSInt representing the maximum integer value with the given bit width and signedness.

This class represents any memset intrinsic.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

ArrayRef< T > drop_front(size_t N=1) const

Drop the first N elements of the array.

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const

Return true if the attribute exists in this set.

static LLVM_ABI AttributeSet get(LLVMContext &C, const AttrBuilder &B)

static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)

Return a uniquified Attribute object.

static LLVM_ABI Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)

static LLVM_ABI Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)

LLVM_ABI StringRef getValueAsString() const

Return the attribute's value as a string.

static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)

Return a uniquified Attribute object that has the specific alignment set.

InstListType::reverse_iterator reverse_iterator

InstListType::iterator iterator

Instruction iterators...

LLVM_ABI bool isSigned() const

Whether the intrinsic is signed or unsigned.

LLVM_ABI Instruction::BinaryOps getBinaryOp() const

Returns the binary operation underlying the intrinsic.

static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...

static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")

static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)

Construct a binary instruction, given the opcode and the two operands.

static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")

static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")

static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

void setCallingConv(CallingConv::ID CC)

MaybeAlign getRetAlign() const

Extract the alignment of the return value.

LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const

Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.

OperandBundleUse getOperandBundleAt(unsigned Index) const

Return the operand bundle at a specific index.

std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const

Return an operand bundle by name, if present.

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

bool isInAllocaArgument(unsigned ArgNo) const

Determine whether this argument is passed in an alloca.

bool hasFnAttr(Attribute::AttrKind Kind) const

Determine whether this call has the given attribute.

bool hasRetAttr(Attribute::AttrKind Kind) const

Determine whether the return value has the given attribute.

unsigned getNumOperandBundles() const

Return the number of operand bundles associated with this User.

uint64_t getParamDereferenceableBytes(unsigned i) const

Extract the number of dereferenceable bytes for a call or parameter (0=unknown).

CallingConv::ID getCallingConv() const

LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Determine whether the argument or parameter has the given attribute.

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

LLVM_ABI bool isIndirectCall() const

Return true if the callsite is an indirect call.

Value * getCalledOperand() const

void setAttributes(AttributeList A)

Set the attributes for this call.

Attribute getFnAttr(StringRef Kind) const

Get the attribute of a given kind for the function.

bool doesNotThrow() const

Determine if the call cannot unwind.

void addRetAttr(Attribute::AttrKind Kind)

Adds the attribute to the return value.

Value * getArgOperand(unsigned i) const

User::op_iterator arg_end()

Return the iterator pointing to the end of the argument list.

bool isConvergent() const

Determine if the invoke is convergent.

FunctionType * getFunctionType() const

LLVM_ABI Intrinsic::ID getIntrinsicID() const

Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...

Value * getReturnedArgOperand() const

If one of the arguments has the 'returned' attribute, returns its operand value.

static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)

Create a clone of CB with a different set of operand bundles and insert it before InsertPt.

iterator_range< User::op_iterator > args()

Iteration adapter for range-for loops.

void setCalledOperand(Value *V)

static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)

Create a clone of CB with operand bundle ID removed.

unsigned arg_size() const

AttributeList getAttributes() const

Return the attributes for this call.

bool hasOperandBundles() const

Return true if this User has any operand bundles.

void setCalledFunction(Function *Fn)

Sets the function called, including updating the function type.

LLVM_ABI Function * getCaller()

Helper to get the caller (the parent function).

CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...

static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)

This class represents a function call, abstracting a target machine's calling convention.

bool isNoTailCall() const

static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

bool isMustTailCall() const

static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)

Returns the opcode necessary to cast Val into Ty using usual casting rules.

static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a ZExt, BitCast, or Trunc for int -> int casts.

static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)

Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.

static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.

static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

@ FCMP_OEQ

0 0 0 1 True if ordered and equal

@ ICMP_SLT

signed less than

@ ICMP_SLE

signed less or equal

@ FCMP_OLT

0 1 0 0 True if ordered and less than

@ FCMP_OGT

0 0 1 0 True if ordered and greater than

@ FCMP_OGE

0 0 1 1 True if ordered and greater than or equal

@ ICMP_UGT

unsigned greater than

@ ICMP_SGT

signed greater than

@ FCMP_ONE

0 1 1 0 True if ordered and operands are unequal

@ FCMP_UEQ

1 0 0 1 True if unordered or equal

@ ICMP_ULT

unsigned less than

@ FCMP_OLE

0 1 0 1 True if ordered and less than or equal

@ FCMP_UNE

1 1 1 0 True if unordered or not equal

@ ICMP_ULE

unsigned less or equal

Predicate getSwappedPredicate() const

For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.

Predicate getNonStrictPredicate() const

For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.

Predicate getUnorderedPredicate() const

static LLVM_ABI ConstantAggregateZero * get(Type *Ty)

static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)

Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.

static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)

static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)

static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)

static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)

This is the shared class of boolean and integer constants.

uint64_t getLimitedValue(uint64_t Limit=~0ULL) const

getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...

static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)

static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)

uint64_t getZExtValue() const

Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...

const APInt & getValue() const

Return the constant as an APInt value reference.

static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)

static LLVM_ABI ConstantPointerNull * get(PointerType *T)

Static factory methods - Return objects of the specified value.

static LLVM_ABI ConstantPtrAuth * get(Constant *Ptr, ConstantInt *Key, ConstantInt *Disc, Constant *AddrDisc, Constant *DeactivationSymbol)

Return a pointer signed with the specified parameters.

This class represents a range of values.

LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const

Return a new range representing the possible values resulting from a multiplication of a value in thi...

LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const

Make this range have the bit width given by BitWidth.

LLVM_ABI bool isFullSet() const

Return true if this set contains all of the elements possible for this data-type.

LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const

Does the predicate Pred hold between ranges this and Other?

LLVM_ABI bool contains(const APInt &Val) const

Return true if the specified value is in the set.

uint32_t getBitWidth() const

Get the bit width of this ConstantRange.

static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)

This is an important base class in LLVM.

static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)

Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...

static LLVM_ABI Constant * getAllOnesValue(Type *Ty)

static LLVM_ABI Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

A parsed version of the target data layout string in and methods for querying it.

Record of a variable value-assignment, aka a non instruction representation of the dbg....

std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)

size_type count(const_arg_type_t< KeyT > Val) const

Return 1 if the specified key is in the map, 0 otherwise.

LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const

Return true if the (end of the) basic block BB dominates the use U.

Lightweight error class with error context and mandatory checking.

static FMFSource intersect(Value *A, Value *B)

Intersect the FMF from two instructions.

This class represents an extension of floating point types.

Convenience struct for specifying and reasoning about fast-math flags.

void setNoSignedZeros(bool B=true)

bool allowReassoc() const

Flag queries.

An instruction for ordering other memory operations.

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this fence instruction.

AtomicOrdering getOrdering() const

Returns the ordering constraint of this fence instruction.

A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...

Class to represent function types.

Type::subtype_iterator param_iterator

static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)

This static method is the primary way of constructing a FunctionType.

bool isConvergent() const

Determine if the call is convergent.

FunctionType * getFunctionType() const

Returns the FunctionType for me.

CallingConv::ID getCallingConv() const

getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...

AttributeList getAttributes() const

Return the attribute list for this Function.

bool doesNotThrow() const

Determine if the function cannot unwind.

bool isIntrinsic() const

isIntrinsic - Returns true if the function's name starts with "llvm.".

LLVM_ABI Value * getBasePtr() const

unsigned getBasePtrIndex() const

The index into the associate statepoint's argument list which contains the base pointer of the pointe...

LLVM_ABI Value * getDerivedPtr() const

unsigned getDerivedPtrIndex() const

The index into the associate statepoint's argument list which contains the pointer whose relocation t...

std::vector< const GCRelocateInst * > getGCRelocates() const

Get list of all gc reloactes linked to this statepoint May contain several relocations for the same b...

MDNode * getMetadata(unsigned KindID) const

Get the current metadata attachments for the given kind, if any.

LLVM_ABI bool isDeclaration() const

Return true if the primary definition of this global value is outside of the current translation unit...

PointerType * getType() const

Global values are always pointers.

Common base class shared among various IRBuilders.

LLVM_ABI Value * CreateLaunderInvariantGroup(Value *Ptr)

Create a launder.invariant.group intrinsic call.

ConstantInt * getTrue()

Get the constant value for i1 true.

LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")

Create a call to intrinsic ID with 2 operands which is mangled on the first type.

LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")

Create a call to intrinsic ID with Args, mangled using Types.

Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)

LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")

Create a call to intrinsic ID with 1 operand which is mangled on its type.

Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)

ConstantInt * getFalse()

Get the constant value for i1 false.

Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")

Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")

LLVM_ABI Value * CreateStripInvariantGroup(Value *Ptr)

Create a strip.invariant.group intrinsic call.

static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const

Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)

Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...

Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override

The specified value produces a vector with any number of elements.

bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override

This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...

Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)

Given an instruction with a select as one operand and a constant as the other operand,...

Instruction * SimplifyAnyMemSet(AnyMemSetInst *MI)

Definition InstCombineCalls.cpp:220

Instruction * visitFree(CallInst &FI, Value *FreedOp)

Instruction * visitCallBrInst(CallBrInst &CBI)

Definition InstCombineCalls.cpp:4125

Instruction * eraseInstFromFunction(Instruction &I) override

Combiner aware instruction erasure.

Value * foldReversedIntrinsicOperands(IntrinsicInst *II)

If all arguments of the intrinsic are reverses, try to pull the reverse after the intrinsic.

Definition InstCombineCalls.cpp:1468

Value * tryGetLog2(Value *Op, bool AssumeNonZero)

Instruction * visitFenceInst(FenceInst &FI)

Definition InstCombineCalls.cpp:4092

Instruction * foldShuffledIntrinsicOperands(IntrinsicInst *II)

If all arguments of the intrinsic are unary shuffles with the same mask, try to shuffle after the int...

Definition InstCombineCalls.cpp:1411

Instruction * visitInvokeInst(InvokeInst &II)

Definition InstCombineCalls.cpp:4120

bool SimplifyDemandedInstructionBits(Instruction &Inst)

Tries to simplify operands to an integer instruction based on its demanded bits.

void CreateNonTerminatorUnreachable(Instruction *InsertAt)

Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...

Instruction * visitVAEndInst(VAEndInst &I)

Definition InstCombineCalls.cpp:827

Instruction * matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, bool MatchBitReversals)

Given an initial instruction, check to see if it is the root of a bswap/bitreverse idiom.

Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)

Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...

Instruction * visitAllocSite(Instruction &FI)

Instruction * SimplifyAnyMemTransfer(AnyMemTransferInst *MI)

Definition InstCombineCalls.cpp:118

OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const

Instruction * visitCallInst(CallInst &CI)

CallInst simplification.

Definition InstCombineCalls.cpp:1736

unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const

IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy

An IRBuilder that automatically inserts new instructions into the worklist.

bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)

Return true if the specified value is free to invert (apply ~ to).

DominatorTree & getDominatorTree() const

Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)

Inserts an instruction New before instruction Old.

Instruction * replaceInstUsesWith(Instruction &I, Value *V)

A combiner-aware RAUW-like routine.

void replaceUse(Use &U, Value *NewValue)

Replace use and add the previously used value to the worklist.

InstructionWorklist & Worklist

A worklist of the instructions that need to be simplified.

void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const

std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)

Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)

Replace operand of instruction and add old operand to the worklist.

bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const

AssumptionCache & getAssumptionCache() const

OptimizationRemarkEmitter & ORE

Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)

const SimplifyQuery & getSimplifyQuery() const

bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)

LLVM_ABI Instruction * clone() const

Create a copy of 'this' instruction that is identical in all ways except the following:

LLVM_ABI void setHasNoUnsignedWrap(bool b=true)

Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.

LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY

Return true if this instruction may modify memory.

LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)

Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...

LLVM_ABI void setHasNoSignedWrap(bool b=true)

Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

LLVM_ABI const Module * getModule() const

Return the module owning the function this instruction belongs to or nullptr it the function does not...

LLVM_ABI void setAAMetadata(const AAMDNodes &N)

Sets the AA metadata on this instruction from the AAMDNodes structure.

LLVM_ABI void moveBefore(InstListType::iterator InsertPos)

Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...

LLVM_ABI const Function * getFunction() const

Return the function this instruction belongs to.

MDNode * getMetadata(unsigned KindID) const

Get the metadata of given kind attached to this Instruction.

bool isTerminator() const

LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)

Set the metadata of the specified kind to the specified node.

LLVM_ABI std::optional< InstListType::iterator > getInsertionPointAfterDef()

Get the first insertion point at which the result of this instruction is defined.

LLVM_ABI bool isIdenticalTo(const Instruction *I) const LLVM_READONLY

Return true if the specified instruction is exactly identical to the current one.

void setDebugLoc(DebugLoc Loc)

Set the debug location information for this instruction.

LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())

Copy metadata from SrcInst to this instruction.

Class to represent integer types.

static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)

This static method is the primary way of constructing an IntegerType.

A wrapper class for inspecting calls to intrinsic functions.

Intrinsic::ID getIntrinsicID() const

Return the intrinsic ID of this intrinsic.

static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)

This is an important class for using LLVM in a threaded context.

An instruction for reading from memory.

static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)

static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)

static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)

Returns the comparison predicate underlying the intrinsic.

ICmpInst::Predicate getPredicate() const

Returns the comparison predicate underlying the intrinsic.

bool isSigned() const

Whether the intrinsic is signed or unsigned.

A Module instance is used to store all the information related to an LLVM module.

StringRef getName() const

Get a short "name" for the module.

unsigned getOpcode() const

Return the opcode for this Instruction or ConstantExpr.

Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.

bool hasNoSignedWrap() const

Test whether this operation is known to never undergo signed overflow, aka the nsw property.

bool hasNoUnsignedWrap() const

Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.

bool isCommutative() const

Return true if the instruction is commutative.

static LLVM_ABI PoisonValue * get(Type *T)

Static factory methods - Return an 'poison' object of the specified type.

Represents a saturating add/sub intrinsic.

This class represents the LLVM 'select' instruction.

static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)

This instruction constructs a fixed permutation of two input vectors.

This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...

bool test(unsigned Idx) const

bool all() const

Returns true if all bits are set.

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...

reference emplace_back(ArgTypes &&... Args)

void reserve(size_type N)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

void setVolatile(bool V)

Specify whether this is a volatile store or not.

void setAlignment(Align Align)

void setOrdering(AtomicOrdering Ordering)

Sets the ordering constraint of this store instruction.

StringRef - Represent a constant reference to a string, i.e.

Class to represent struct types.

static LLVM_ABI bool isCallingConvCCompatible(CallBase *CI)

Returns true if call site / callee has cdecl-compatible calling conventions.

Provides information about what library functions are available for the current target.

This class represents a truncation of integer types.

The instances of the Type class are immutable: once they are created, they are never changed.

static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)

LLVM_ABI unsigned getIntegerBitWidth() const

static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)

bool isPointerTy() const

True if this is an instance of PointerType.

LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const

Return true if this type could be converted with a lossless BitCast to type 'Ty'.

Type * getScalarType() const

If this is a vector type, return the element type, otherwise return 'this'.

bool isStructTy() const

True if this is an instance of StructType.

LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const

Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...

LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY

If this is a vector type, return the getPrimitiveSizeInBits value for the element type.

bool isIntegerTy() const

True if this is an instance of IntegerType.

bool isVoidTy() const

Return true if this is 'void'.

static UnaryOperator * CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)

static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)

static LLVM_ABI UndefValue * get(Type *T)

Static factory methods - Return an 'undef' object of the specified type.

A Use represents the edge between a Value definition and its users.

LLVM_ABI unsigned getOperandNo() const

Return the operand # of this use in its User.

void setOperand(unsigned i, Value *Val)

Value * getOperand(unsigned i) const

This represents the llvm.va_end intrinsic.

static LLVM_ABI void ValueIsDeleted(Value *V)

static LLVM_ABI void ValueIsRAUWd(Value *Old, Value *New)

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

static constexpr uint64_t MaximumAlignment

bool hasOneUse() const

Return true if there is exactly one use of this value.

iterator_range< user_iterator > users()

static LLVM_ABI void dropDroppableUse(Use &U)

Remove the droppable use U.

LLVM_ABI const Value * stripPointerCasts() const

Strip off pointer casts, all-zero GEPs and address space casts.

LLVM_ABI LLVMContext & getContext() const

All values hold a context through their type.

static constexpr unsigned MaxAlignmentExponent

The maximum alignment for instructions.

LLVM_ABI StringRef getName() const

Return a constant reference to the value's name.

LLVM_ABI void takeName(Value *V)

Transfer the name from V to this value.

Base class of all SIMD vector types.

ElementCount getElementCount() const

Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...

static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)

This static method is the primary way to construct an VectorType.

constexpr ScalarTy getFixedValue() const

static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)

constexpr bool isFixed() const

Returns true if the quantity is not scaled by vscale.

static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)

const ParentTy * getParent() const

self_iterator getIterator()

NodeTy * getNextNode()

Get the next node, or nullptr for the list tail.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Align[]

Key for Kernel::Arg::Metadata::mAlign.

constexpr char Args[]

Key for Kernel::Metadata::mArgs.

constexpr char Attrs[]

Key for Kernel::Metadata::mAttrs.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ C

The default llvm calling convention, compatible with C.

@ BasicBlock

Various leaf nodes.

LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})

Look up the Function declaration of the intrinsic id in the Module M.

SpecificConstantMatch m_ZeroInt()

Convenience matchers for specific integer values.

BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)

Matches a register negated by a G_SUB.

BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)

Matches a register not-ed by a G_XOR.

OneUse_match< SubPat > m_OneUse(const SubPat &SP)

cst_pred_ty< is_all_ones > m_AllOnes()

Match an integer or vector with all bits set.

class_match< PoisonValue > m_Poison()

Match an arbitrary poison constant.

BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)

class_match< BinaryOperator > m_BinOp()

Match an arbitrary binary operation and ignore it.

auto m_PtrToIntOrAddr(const OpTy &Op)

Matches PtrToInt or PtrToAddr.

m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)

class_match< Constant > m_Constant()

Match an arbitrary Constant and ignore it.

ap_match< APInt > m_APInt(const APInt *&Res)

Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.

BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)

Matches an And with LHS and RHS in either order.

CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)

Matches Trunc.

BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)

ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)

Match APInt while allowing poison in splat vector constants.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)

specific_intval< false > m_SpecificInt(const APInt &V)

Match a specific integer value or vector with all elements equal to the value.

bool match(Val *V, const Pattern &P)

bind_ty< Instruction > m_Instruction(Instruction *&I)

Match an instruction, capturing it if we match.

specificval_ty m_Specific(const Value *V)

Match if we have a specific specified value.

ap_match< APFloat > m_APFloat(const APFloat *&Res)

Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...

OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)

Matches a 'Neg' as 'sub nsw 0, V'.

class_match< ConstantInt > m_ConstantInt()

Match an arbitrary ConstantInt and ignore it.

cst_pred_ty< is_one > m_One()

Match an integer 1 or a vector with all elements equal to 1.

IntrinsicID_match m_Intrinsic()

Match intrinsic calls like this: m_IntrinsicIntrinsic::fabs(m_Value(X))

ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)

Matches SelectInst.

cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()

Match a floating-point negative zero.

specific_fpval m_SpecificFP(double V)

Match a specific floating point value or vector with all elements equal to the value.

ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)

Match a single index ExtractValue instruction.

BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)

Matches logical shift operations.

match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)

Combine two pattern matchers matching L && R.

MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)

Matches an Xor with LHS and RHS in either order.

deferredval_ty< Value > m_Deferred(Value *const &V)

Like m_Specific(), but works if the specific value to match is determined as part of the same match()...

match_combine_or< match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > >, OpTy > m_ZExtOrSExtOrSelf(const OpTy &Op)

auto m_LogicalOr()

Matches L || R where L and R are arbitrary values.

TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)

Matches ShuffleVectorInst independently of mask value.

cst_pred_ty< is_strictlypositive > m_StrictlyPositive()

Match an integer or vector of strictly positive values.

ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)

Match Select(C, LHS, RHS) or Select(C, RHS, LHS)

CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)

SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)

CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)

Matches ZExt.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)

OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)

MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)

cst_pred_ty< is_negated_power2 > m_NegatedPower2()

Match a integer or vector negated power-of-2.

match_immconstant_ty m_ImmConstant()

Match an arbitrary immediate Constant and ignore it.

cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)

Match an integer or vector where CheckFn(ele) for each element is true.

m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)

match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)

class_match< UnaryOperator > m_UnOp()

Match an arbitrary unary operation and ignore it.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)

MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)

match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)

Match either "add nsw" or "or disjoint".

class_match< Value > m_Value()

Match an arbitrary value and ignore it.

BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)

Exact_match< T > m_Exact(const T &SubPattern)

FNeg_match< OpTy > m_FNeg(const OpTy &X)

Match 'fneg X' as 'fsub -0.0, X'.

BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)

Matches shift operations.

cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()

Match a floating-point positive zero.

BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)

m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)

auto m_LogicalAnd()

Matches L && R where L and R are arbitrary values.

match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)

m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)

BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)

auto m_Undef()

Match an arbitrary undef constant.

m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)

CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)

Matches SExt.

is_zero m_Zero()

Match any null constant or a vector with all elements equal to 0.

BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)

Matches an Or with LHS and RHS in either order.

match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)

Match either "add nuw" or "or disjoint".

BinOpPred_match< LHS, RHS, is_bitwiselogic_op > m_BitwiseLogic(const LHS &L, const RHS &R)

Matches bitwise logic operations.

m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)

BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)

Matches a Mul with LHS and RHS in either order.

m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)

MatchFunctor< Val, Pattern > match_fn(const Pattern &P)

A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.

MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)

match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)

Combine two pattern matchers matching L || R.

@ SingleThread

Synchronized with respect to signal handlers executing in the same thread.

@ System

Synchronized with respect to all concurrently executing threads.

SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)

Return a range of dbg_assign records for which Inst performs the assignment they encode.

initializer< Ty > init(const Ty &Val)

std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)

Extract a Value from Metadata.

DiagnosticInfoOptimizationBase::Argument NV

friend class Instruction

Iterator for Instructions in a `BasicBlock.

This is an optimization pass for GlobalISel generic memory operations.

LLVM_ABI cl::opt< bool > EnableKnowledgeRetention

LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)

unsigned Log2_32_Ceil(uint32_t Value)

Return the ceil log base 2 of the specified value, 32 if the value is zero.

FunctionAddr VTableAddr Value

@ NeverOverflows

Never overflows.

@ AlwaysOverflowsHigh

Always overflows in the direction of signed/unsigned max value.

@ AlwaysOverflowsLow

Always overflows in the direction of signed/unsigned min value.

@ MayOverflow

May or may not overflow.

LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)

Given operands for an FMul, fold the result or return null.

LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)

Return true if it is valid to use the assumptions provided by an assume intrinsic,...

LLVM_ABI APInt possiblyDemandedEltsInMask(Value *Mask)

Given a mask vector of the form , return an APInt (of bitwidth Y) for each lane which may be ...

LLVM_ABI RetainedKnowledge simplifyRetainedKnowledge(AssumeInst *Assume, RetainedKnowledge RK, AssumptionCache *AC, DominatorTree *DT)

canonicalize the RetainedKnowledge RK.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)

Return true if this is a call to an allocation function that does not have side effects that we are r...

LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)

Try to turn a call to @llvm.objectsize into an integer value of the given Type.

LLVM_ABI Value * getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI)

Gets the alignment argument for an aligned_alloc-like function, using either built-in knowledge based...

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

LLVM_ABI RetainedKnowledge getKnowledgeFromOperandInAssume(AssumeInst &Assume, unsigned Idx)

Retreive the information help by Assume on the operand at index Idx.

LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)

Implements IEEE 754-2019 maximum semantics.

LLVM_ABI Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)

Given a callsite, callee, and arguments, fold the result or return null.

LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)

Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.

constexpr T alignDown(U Value, V Align, W Skew=0)

Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.

constexpr bool isPowerOf2_64(uint64_t Value)

Return true if the argument is a power of two > 0 (64 bit edition.)

LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)

Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...

LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)

Return true if the instruction does not have any effects besides calculating the result and does not ...

LLVM_ABI Value * getSplatValue(const Value *V)

Get splat value if the input is a splat vector or return nullptr.

constexpr T MinAlign(U A, V B)

A and B are either alignments or offsets.

LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)

This extracts the Knowledge from an element of an operand bundle.

auto dyn_cast_or_null(const Y &Val)

Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to infer an alignment for the specified pointer.

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)

Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...

LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)

Implements IEEE-754 2008 maxNum semantics.

LLVM_ABI FPClassTest fneg(FPClassTest Mask)

Return the test mask which returns true if the value's sign bit is flipped.

SelectPatternFlavor

Specific patterns of select instructions we can match.

@ SPF_ABS

Floating point maxnum.

@ SPF_NABS

Absolute value.

LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)

constexpr bool isPowerOf2_32(uint32_t Value)

Return true if the argument is a power of two > 0.

bool isModSet(const ModRefInfo MRI)

void sort(IteratorTy Start, IteratorTy End)

FPClassTest

Floating-point class tests, supported by 'is_fpclass' intrinsic.

LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)

Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...

LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)

Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...

LLVM_ABI bool matchSimpleBinaryIntrinsicRecurrence(const IntrinsicInst *I, PHINode *&P, Value *&Init, Value *&OtherOp)

Attempt to match a simple value-accumulating recurrence of the form: llvm.intrinsic....

LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)

Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...

auto find_if_not(R &&Range, UnaryPredicate P)

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

bool none_of(R &&Range, UnaryPredicate P)

Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.

bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)

LLVM_ABI Constant * getLosslessSignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)

LLVM_ABI AssumeInst * buildAssumeFromKnowledge(ArrayRef< RetainedKnowledge > Knowledge, Instruction *CtxI, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)

Build and return a new assume created from the provided knowledge if the knowledge in the assume is f...

LLVM_ABI FPClassTest inverse_fabs(FPClassTest Mask)

Return the test mask which returns true after fabs is applied to the value.

LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)

Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...

iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)

Split the specified string over a separator and return a range-compatible iterable over its partition...

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)

Return true if the instruction doesn't potentially cross vector lanes.

LLVM_ABI bool maskIsAllOneOrUndef(Value *Mask)

Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...

LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key

LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)

Attempt to constant fold a binary operation with the specified operands.

LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)

Return true if the given value is known to be non-zero when defined.

constexpr int PoisonMaskElem

@ Mod

The access may modify the value stored in memory.

LLVM_ABI Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)

Given operands for the multiplication of a FMA, fold the result or return null.

FunctionAddr VTableAddr uintptr_t uintptr_t Data

LLVM_ABI Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)

Given a constrained FP intrinsic call, tries to compute its simplified version.

LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)

Implements IEEE-754 2008 minNum semantics.

OperandBundleDefT< Value * > OperandBundleDef

LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)

Identifies if the vector form of the intrinsic has a scalar operand.

LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)

Combine constant ranges from computeConstantRange() and computeKnownBits().

FunctionAddr VTableAddr Next

DWARFExpression::Operation Op

bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)

Don't use information from its non-constant operands.

ArrayRef(const T &OneElt) -> ArrayRef< T >

LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)

If this if a call to a free function, return the freed operand.

constexpr unsigned BitWidth

LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)

Return true if this is always a dereferenceable pointer.

LLVM_ABI bool maskIsAllZeroOrUndef(Value *Mask)

Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

bool is_contained(R &&Range, const E &Element)

Returns true if Element is found in Range.

LLVM_ABI std::optional< APInt > getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI, function_ref< const Value *(const Value *)> Mapper=[](const Value *V) { return V;})

Return the size of the requested allocation.

unsigned Log2(Align A)

Returns the log2 of the alignment.

LLVM_ABI bool maskContainsAllOneOrUndef(Value *Mask)

Given a mask vector of i1, Return true if any of the elements of this predicate mask are known to be ...

LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)

Return the boolean condition value in the context of the given instruction if it is known based on do...

LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)

Implements IEEE 754-2019 minimum semantics.

LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)

Return true if the two given values are negation.

LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)

This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....

LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)

Returns true if the give value is known to be non-negative.

LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)

Identify if the intrinsic is trivially vectorizable.

LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)

Return false if we can prove that the specified FP value's sign bit is 0.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

A collection of metadata nodes that might be associated with a memory access used by the alias-analys...

This struct is a compact representation of a valid (non-zero power of two) alignment.

@ IEEE

IEEE-754 denormal numbers preserved.

bool isNonNegative() const

Returns true if this value is known to be non-negative.

unsigned countMinTrailingZeros() const

Returns the minimum number of trailing zero bits.

unsigned countMaxTrailingZeros() const

Returns the maximum number of trailing zero bits possible.

unsigned countMaxPopulation() const

Returns the maximum number of bits that could be one.

unsigned getBitWidth() const

Get the bit width of this value.

bool isNonZero() const

Returns true if this value is known to be non-zero.

unsigned countMinLeadingZeros() const

Returns the minimum number of leading zero bits.

bool isNegative() const

Returns true if this value is known to be negative.

unsigned countMaxLeadingZeros() const

Returns the maximum number of leading zero bits possible.

unsigned countMinPopulation() const

Returns the number of bits known to be one.

bool isAllOnes() const

Returns true if value is all one bits.

FPClassTest KnownFPClasses

Floating-point classes the value could be one of.

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.

Align valueOrOne() const

For convenience, returns a valid alignment or 1 if undefined.

A lightweight accessor for an operand bundle meant to be passed around by value.

StringRef getTagName() const

Return the tag of this operand bundle as a string.

uint32_t getTagID() const

Return the tag of this operand bundle as an integer.

Represent one information held inside an operand bundle of an llvm.assume.

Attribute::AttrKind AttrKind

SelectPatternFlavor Flavor

SimplifyQuery getWithInstruction(const Instruction *I) const