LLVM: lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

40

41using namespace llvm;

43

44#define DEBUG_TYPE "aggressive-instcombine"

45

46namespace llvm {

48}

49

50STATISTIC(NumAnyOrAllBitsSet, "Number of any/all-bits-set patterns folded");

52 "Number of guarded rotates transformed into funnel shifts");

54 "Number of guarded funnel shifts transformed into funnel shifts");

55STATISTIC(NumPopCountRecognized, "Number of popcount idioms recognized");

56

59 cl::desc("Max number of instructions to scan for aggressive instcombine."));

60

63 cl::desc("The maximum length of a constant string for a builtin string cmp "

64 "call eligible for inlining. The default value is 3."));

65

68 cl::desc("The maximum length of a constant string to "

69 "inline a memchr call."));

70

71

72

73

75 if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2)

76 return false;

77

78

79

80

81

82 if (isPowerOf2\_32(I.getType()->getScalarSizeInBits()))

83 return false;

84

85

86

89 unsigned Width = V->getType()->getScalarSizeInBits();

90

91

92

97 return Intrinsic::fshl;

98 }

99

100

101

106 return Intrinsic::fshr;

107 }

108

110 };

111

112

113

114

115

116

118 unsigned FunnelOp = 0, GuardOp = 1;

119 Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1);

120 Value *ShVal0, *ShVal1, *ShAmt;

123 (IID == Intrinsic::fshl && ShVal0 != P1) ||

124 (IID == Intrinsic::fshr && ShVal1 != P1)) {

127 (IID == Intrinsic::fshl && ShVal0 != P0) ||

128 (IID == Intrinsic::fshr && ShVal1 != P0))

129 return false;

130 assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&

131 "Pattern must match funnel shift left or right");

133 }

134

135

136

137

138

139 BasicBlock *GuardBB = Phi.getIncomingBlock(GuardOp);

140 BasicBlock *FunnelBB = Phi.getIncomingBlock(FunnelOp);

142

143

145 return false;

146

151 return false;

152

154

155 if (ShVal0 == ShVal1)

156 ++NumGuardedRotates;

157 else

158 ++NumGuardedFunnelShifts;

159

160

161

162 bool IsFshl = IID == Intrinsic::fshl;

163 if (ShVal0 != ShVal1) {

165 ShVal1 = Builder.CreateFreeze(ShVal1);

167 ShVal0 = Builder.CreateFreeze(ShVal0);

168 }

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184 Phi.replaceAllUsesWith(

185 Builder.CreateIntrinsic(IID, Phi.getType(), {ShVal0, ShVal1, ShAmt}));

186 return true;

187}

188

189

190

191

192

193namespace {

194struct MaskOps {

195 Value *Root = nullptr;

196 APInt Mask;

197 bool MatchAndChain;

198 bool FoundAnd1 = false;

199

200 MaskOps(unsigned BitWidth, bool MatchAnds)

201 : Mask(APInt::getZero(BitWidth)), MatchAndChain(MatchAnds) {}

202};

203}

204

205

206

207

208

209

210

211

213 Value *Op0, *Op1;

214 if (MOps.MatchAndChain) {

215

216

217

219 MOps.FoundAnd1 = true;

221 }

224 } else {

225

228 }

229

230

231

232 Value *Candidate;

233 const APInt *BitIndex = nullptr;

235 Candidate = V;

236

237

238 if (!MOps.Root)

239 MOps.Root = Candidate;

240

241

242 if (BitIndex && BitIndex->uge(MOps.Mask.getBitWidth()))

243 return false;

244

245

247 return MOps.Root == Candidate;

248}

249

250

251

252

253

254

255

256

257

259

260

261 bool MatchAllBitsSet;

263 MatchAllBitsSet = true;

265 MatchAllBitsSet = false;

266 else

267 return false;

268

269 MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet);

270 if (MatchAllBitsSet) {

272 return false;

273 } else {

275 return false;

276 }

277

278

279

281 Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask);

282 Value *And = Builder.CreateAnd(MOps.Root, Mask);

283 Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask)

284 : Builder.CreateIsNotNull(And);

285 Value *Zext = Builder.CreateZExt(Cmp, I.getType());

286 I.replaceAllUsesWith(Zext);

287 ++NumAnyOrAllBitsSet;

288 return true;

289}

290

291

292

293

294

295

296

297

298

299

300

301

303 if (I.getOpcode() != Instruction::LShr)

304 return false;

305

306 Type *Ty = I.getType();

307 if (!Ty->isIntOrIntVectorTy())

308 return false;

309

310 unsigned Len = Ty->getScalarSizeInBits();

311

312 if (!(Len <= 128 && Len > 8 && Len % 8 == 0))

313 return false;

314

320

321 Value *Op0 = I.getOperand(0);

322 Value *Op1 = I.getOperand(1);

324

328

333

334 if (match(ShiftOp0,

338 Value *Root, *SubOp1;

339

340 const APInt *AndMask;

344 auto CheckAndMask = [&]() {

345 if (*AndMask == Mask55)

346 return true;

347

348

349

351 return false;

352

353 APInt NeededMask = Mask55 & ~*AndMask;

355 NeededMask,

357 };

358

359 if (CheckAndMask()) {

360 LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");

362 I.replaceAllUsesWith(

363 Builder.CreateIntrinsic(Intrinsic::ctpop, I.getType(), {Root}));

364 ++NumPopCountRecognized;

365 return true;

366 }

367 }

368 }

369 }

370 }

371

372 return false;

373}

374

375

376

377

378

379

380

381

382

384

386 const APInt *MinC, *MaxC;

393 return false;

394

395

396 if (!(*MinC + 1).isPowerOf2() || -*MaxC != *MinC + 1)

397 return false;

398

399 Type *IntTy = I.getType();

400 Type *FpTy = In->getType();

401 Type *SatTy =

402 IntegerType::get(IntTy->getContext(), (*MinC + 1).exactLogBase2() + 1);

404 SatTy = VectorType::get(SatTy, VecTy->getElementCount());

405

406

407

411 SatCost += TTI.getCastInstrCost(Instruction::SExt, IntTy, SatTy,

414

418 MinMaxCost += TTI.getIntrinsicInstrCost(

421 MinMaxCost += TTI.getIntrinsicInstrCost(

424

425 if (SatCost >= MinMaxCost)

426 return false;

427

430 Builder.CreateIntrinsic(Intrinsic::fptosi_sat, {SatTy, FpTy}, In);

431 I.replaceAllUsesWith(Builder.CreateSExt(Sat, IntTy));

432 return true;

433}

434

435

436

437

441

442

443

444

445

446

448 Value *Arg = Call->getArgOperand(0);

449 if (TTI.haveFastSqrt(Ty) &&

450 (Call->hasNoNaNs() ||

455 Builder.CreateIntrinsic(Intrinsic::sqrt, Ty, Arg, Call, "sqrt");

456 Call->replaceAllUsesWith(NewSqrt);

457

458

459

460 Call->eraseFromParent();

461 return true;

462 }

463

464 return false;

465}

466

467

468

469

471 const APInt &AndMask, Type *AccessTy,

472 unsigned InputBits, const APInt &GEPIdxFactor,

474 for (unsigned Idx = 0; Idx < InputBits; Idx++) {

475 APInt Index = (APInt(InputBits, 1).shl(Idx) * Mul).lshr(Shift) & AndMask;

478 if (C || C->getValue() != Idx)

479 return false;

480 }

481

482 return true;

483}

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

546 if (!LI)

547 return false;

548

551 return false;

552

554 if (GEP || GEP->hasNoUnsignedSignedWrap())

555 return false;

556

559 return false;

560

561 unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());

562 APInt ModOffset(BW, 0);

564 if (GEP->collectOffset(DL, BW, VarOffsets, ModOffset) ||

565 VarOffsets.size() != 1 || ModOffset != 0)

566 return false;

567 auto [GepIdx, GEPScale] = VarOffsets.front();

568

570 const APInt *MulConst, *ShiftConst, *AndCst = nullptr;

571

572

573

574

575 auto MatchInner = m_LShr(

580 return false;

581

583 if (InputBits != 16 && InputBits != 32 && InputBits != 64 && InputBits != 128)

584 return false;

585

586 if (!GEPScale.isIntN(InputBits) ||

589 InputBits, GEPScale.zextOrTrunc(InputBits), DL))

590 return false;

591

594 bool DefinedForZero = ZeroTableElem->getZExtValue() == InputBits;

595

597 ConstantInt *BoolConst = B.getInt1(!DefinedForZero);

599 auto Cttz = B.CreateIntrinsic(Intrinsic::cttz, {XType}, {X1, BoolConst});

600 Value *ZExtOrTrunc = nullptr;

601

602 if (DefinedForZero) {

603 ZExtOrTrunc = B.CreateZExtOrTrunc(Cttz, AccessType);

604 } else {

605

606

607 auto Cmp = B.CreateICmpEQ(X1, ConstantInt::get(XType, 0));

608 auto Select = B.CreateSelect(Cmp, B.CreateZExt(ZeroTableElem, XType), Cttz);

609

610

613 SelectI->setMetadata(

614 LLVMContext::MD_prof,

616 }

617

618

619

620

621 ZExtOrTrunc = B.CreateZExtOrTrunc(Select, AccessType);

622 }

623

625

626 return true;

627}

628

629

630

631

641

642

643

644

650

651

655 ShAmt2)))))) {

657

658 return false;

659 } else

660 return false;

661

662

669 }

671

672

673 if (LI1 == LI2 || !LI1 || !LI2 || !LI1->isSimple() || !LI2->isSimple() ||

675 return false;

676

677

679 return false;

680

681

682 bool IsBigEndian = DL.isBigEndian();

683

684

686 APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0);

687 Load1Ptr =

689 true);

690

692 APInt Offset2(DL.getIndexTypeSizeInBits(Load2Ptr->getType()), 0);

693 Load2Ptr =

695 true);

696

697

700 if (Load1Ptr != Load2Ptr)

701 return false;

702

703

704 if (DL.typeSizeEqualsStoreSize(LI1->getType()) ||

705 DL.typeSizeEqualsStoreSize(LI2->getType()))

706 return false;

707

708

711 if (!Start->comesBefore(End)) {

713

714

715

722 else

724 } else

726 unsigned NumScanned = 0;

728 make_range(Start->getIterator(), End->getIterator())) {

729 if (Inst.mayWriteToMemory() && isModSet(AA.getModRefInfo(&Inst, Loc)))

730 return false;

731

733 return false;

734 }

735

736

738 if (Offset2.slt(Offset1)) {

745 }

746

747

748 if (IsBigEndian)

750

751

752

756 else

758 }

759

760

761

762 uint64_t ShiftDiff = IsBigEndian ? LoadSize2 : LoadSize1;

765 if ((ShAmt2 - ShAmt1) != ShiftDiff || (Offset2 - Offset1) != PrevSize)

766 return false;

767

768

774 }

775 LOps.LoadSize = LoadSize1 + LoadSize2;

777

778

780

781 LOps.Root = LI1;

782 LOps.Shift = ShAmt1;

784 return true;

785}

786

787

788

789

793

795 return false;

796

799 return false;

800

802 LoadInst *NewLoad = nullptr, *LI1 = LOps.Root;

803

805

806 bool Allowed = TTI.isTypeLegal(WiderType);

807 if (!Allowed)

808 return false;

809

810 unsigned AS = LI1->getPointerAddressSpace();

811 unsigned Fast = 0;

812 Allowed = TTI.allowsMisalignedMemoryAccesses(I.getContext(), LOps.LoadSize,

813 AS, LI1->getAlign(), &Fast);

814 if (!Allowed || Fast)

815 return false;

816

817

818 Value *Load1Ptr = LI1->getPointerOperand();

819 Builder.SetInsertPoint(LOps.RootInsert);

821 APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0);

823 DL, Offset1, true);

824 Load1Ptr = Builder.CreatePtrAdd(Load1Ptr, Builder.getInt(Offset1));

825 }

826

827 NewLoad = Builder.CreateAlignedLoad(WiderType, Load1Ptr, LI1->getAlign(),

828 LI1->isVolatile(), "");

830

833

834 Value *NewOp = NewLoad;

835

837 NewOp = Builder.CreateZExt(NewOp, LOps.ZextType);

838

839

840

842 NewOp = Builder.CreateShl(NewOp, LOps.Shift);

843 I.replaceAllUsesWith(NewOp);

844

845 return true;

846}

847

848

865

869 if (!Store || !Store->isSimple())

870 return std::nullopt;

871

872 Value *StoredVal = Store->getValueOperand();

874 if (!StoredTy->isIntegerTy() || DL.typeSizeEqualsStoreSize(StoredTy))

875 return std::nullopt;

876

881 return std::nullopt;

882

883 Value *Ptr = Store->getPointerOperand();

884 APInt PtrOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);

886 DL, PtrOffset, true);

887 return {{PtrBase, PtrOffset, Val, ValOffset, ValWidth, Store}};

888}

889

893 if (Parts.size() < 2)

894 return false;

895

896

897

901 unsigned Fast = 0;

902 if (TTI.isTypeLegal(NewTy) ||

903 TTI.allowsMisalignedMemoryAccesses(Ctx, Width,

904 First.Store->getPointerAddressSpace(),

905 First.Store->getAlign(), &Fast) ||

907 return false;

908

909

912 if (First.ValOffset != 0)

913 Val = Builder.CreateLShr(Val, First.ValOffset);

914 Val = Builder.CreateTrunc(Val, NewTy);

915 StoreInst *Store = Builder.CreateAlignedStore(

916 Val, First.Store->getPointerOperand(), First.Store->getAlign());

917

918

925 AATags = AATags.concat(Part.Store->getAAMetadata());

927 DbgLocs.push_back(Part.Store->getDebugLoc());

928 }

929 Store->setAAMetadata(AATags);

930 Store->mergeDIAssignID(Stores);

932

933

934 for (const PartStore &Part : Parts)

935 Part.Store->eraseFromParent();

936

937 return true;

938}

939

942 if (Parts.size() < 2)

943 return false;

944

945

946

947

948

951 int64_t LastEndOffsetFromFirst = 0;

953 for (const PartStore &Part : Parts) {

954 APInt PtrOffsetFromFirst = Part.PtrOffset - First->PtrOffset;

955 int64_t ValOffsetFromFirst = Part.ValOffset - First->ValOffset;

956 if (PtrOffsetFromFirst * 8 != ValOffsetFromFirst ||

957 LastEndOffsetFromFirst != ValOffsetFromFirst) {

959 LastEndOffsetFromFirst, DL, TTI);

961 LastEndOffsetFromFirst = Part.ValWidth;

962 continue;

963 }

964

965 LastEndOffsetFromFirst = ValOffsetFromFirst + Part.ValWidth;

966 }

967

969 LastEndOffsetFromFirst, DL, TTI);

971}

972

975

976 if (DL.isBigEndian())

977 return false;

978

981 bool MadeChange = false;

984 if (Parts.empty() || Part->isCompatibleWith(Parts[0])) {

985 Parts.push_back(std::move(*Part));

986 continue;

987 }

988

991 Parts.push_back(std::move(*Part));

992 continue;

993 }

994

995 if (Parts.empty())

996 continue;

997

998 if (I.mayThrow() ||

999 (I.mayReadOrWriteMemory() &&

1004 continue;

1005 }

1006 }

1007

1009 return MadeChange;

1010}

1011

1012

1013

1016 if (I || I->getOpcode() != Instruction::Or || I->hasOneUse())

1017 return nullptr;

1018

1020

1021

1022

1023 Value *Op0 = I->getOperand(0);

1026 Op0 = A;

1028 Op0 = NOp;

1029

1030 Value *Op1 = I->getOperand(1);

1033 Op1 = A;

1035 Op1 = NOp;

1036

1037 if (Op0 != I->getOperand(0) || Op1 != I->getOperand(1))

1038 return Builder.CreateOr(Op0, Op1);

1039 return nullptr;

1040}

1041

1049 return false;

1050

1051

1052

1054 if (OpI->getOpcode() == Instruction::Or)

1056 return true;

1057

1059

1061 I.replaceAllUsesWith(Builder.CreateICmp(Pred, Res, I.getOperand(1)));

1062 return true;

1063 }

1064

1065 return false;

1066}

1067

1068

1069

1070static std::pair<APInt, APInt>

1072 unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType());

1073 std::optional Stride;

1074 APInt ModOffset(BW, 0);

1075

1076

1079 if (GEP->collectOffset(DL, BW, VarOffsets, ModOffset))

1080 break;

1081

1082 for (auto [V, Scale] : VarOffsets) {

1083

1084 if (GEP->hasNoUnsignedSignedWrap())

1086

1087 if (!Stride)

1088 Stride = Scale;

1089 else

1091 }

1092

1093 PtrOp = GEP->getPointerOperand();

1094 }

1095

1096

1097

1099 return {APInt(BW, 1), APInt(BW, 0)};

1100

1101

1102

1103 ModOffset = ModOffset.srem(*Stride);

1105 ModOffset += *Stride;

1106

1107 return {*Stride, ModOffset};

1108}

1109

1110

1111

1114 if (!LI || LI->isVolatile())

1115 return false;

1116

1117

1118

1119 auto *PtrOp = LI->getPointerOperand();

1121 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())

1122 return false;

1123

1124

1125 Constant *C = GV->getInitializer();

1126 uint64_t GVSize = DL.getTypeAllocSize(C->getType());

1127 if (!GVSize || 4096 < GVSize)

1128 return false;

1129

1130 Type *LoadTy = LI->getType();

1131 unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType());

1133

1134

1135

1136

1137 if (auto LA = LI->getAlign();

1138 LA <= GV->getAlign().valueOrOne() && Stride.getZExtValue() < LA.value()) {

1139 ConstOffset = APInt(BW, 0);

1140 Stride = APInt(BW, LA.value());

1141 }

1142

1144 if (!Ca)

1145 return false;

1146

1147 unsigned E = GVSize - DL.getTypeStoreSize(LoadTy);

1148 for (; ConstOffset.getZExtValue() <= E; ConstOffset += Stride)

1150 return false;

1151

1152 I.replaceAllUsesWith(Ca);

1153

1154 return true;

1155}

1156

1157namespace {

1158class StrNCmpInliner {

1159public:

1160 StrNCmpInliner(CallInst *CI, LibFunc Func, DomTreeUpdater *DTU,

1161 const DataLayout &DL)

1162 : CI(CI), Func(Func), DTU(DTU), DL(DL) {}

1163

1164 bool optimizeStrNCmp();

1165

1166private:

1167 void inlineCompare(Value *LHS, StringRef RHS, uint64_t N, bool Swapped);

1168

1169 CallInst *CI;

1170 LibFunc Func;

1171 DomTreeUpdater *DTU;

1172 const DataLayout &DL;

1173};

1174

1175}

1176

1177

1178

1179

1180

1181

1182

1183

1184

1185

1186

1187

1188

1189

1190

1191

1192

1193

1194

1195

1196

1197

1198

1199

1200

1201

1202

1203

1204

1205bool StrNCmpInliner::optimizeStrNCmp() {

1207 return false;

1208

1210 return false;

1211

1214

1215 if (Str1P == Str2P)

1216 return false;

1217

1218 StringRef Str1, Str2;

1221 if (HasStr1 == HasStr2)

1222 return false;

1223

1224

1225 StringRef Str = HasStr1 ? Str1 : Str2;

1226 Value *StrP = HasStr1 ? Str2P : Str1P;

1227

1228 size_t Idx = Str.find('\0');

1230 if (Func == LibFunc_strncmp) {

1232 N = std::min(N, ConstInt->getZExtValue());

1233 else

1234 return false;

1235 }

1236

1238 return false;

1239

1240

1241

1242 bool CanBeNull = false, CanBeFreed = false;

1244 return false;

1245 inlineCompare(StrP, Str, N, HasStr1);

1246 return true;

1247}

1248

1249

1250

1251

1252

1253

1254

1255

1256

1257

1258

1259

1260

1261

1262

1263

1264

1265

1266

1267

1268

1269

1270

1271

1272

1273

1274

1275

1276

1277

1278

1279

1280

1281

1282

1283void StrNCmpInliner::inlineCompare(Value *LHS, StringRef RHS, uint64_t N,

1284 bool Swapped) {

1287

1288

1289

1290

1291

1292

1293 B.SetCurrentDebugLocation(CI->getDebugLoc());

1294

1297 SplitBlock(BBCI, CI, DTU, nullptr, nullptr, BBCI->getName() + ".tail");

1298

1300 for (uint64_t I = 0; I < N; ++I)

1304

1306

1307 B.SetInsertPoint(BBNE);

1308 PHINode *Phi = B.CreatePHI(CI->getType(), N);

1309 B.CreateBr(BBTail);

1310

1312 for (uint64_t i = 0; i < N; ++i) {

1313 B.SetInsertPoint(BBSubs[i]);

1315 B.CreateZExt(B.CreateLoad(B.getInt8Ty(),

1316 B.CreateInBoundsPtrAdd(Base, B.getInt64(i))),

1319 ConstantInt::get(CI->getType(), static_cast<unsigned char>(RHS[i]));

1320 Value *Sub = Swapped ? B.CreateSub(VR, VL) : B.CreateSub(VL, VR);

1321 if (i < N - 1) {

1322 BranchInst *CondBrInst = B.CreateCondBr(

1323 B.CreateICmpNE(Sub, ConstantInt::get(CI->getType(), 0)), BBNE,

1324 BBSubs[i + 1]);

1325

1327 assert(F && "Instruction does not belong to a function!");

1328 std::optionalFunction::ProfileCount EC = F->getEntryCount();

1329 if (EC && EC->getCount() > 0)

1331 } else {

1332 B.CreateBr(BBNE);

1333 }

1334

1335 Phi->addIncoming(Sub, BBSubs[i]);

1336 }

1337

1340

1341 if (DTU) {

1343 Updates.push_back({DominatorTree::Insert, BBCI, BBSubs[0]});

1344 for (uint64_t i = 0; i < N; ++i) {

1345 if (i < N - 1)

1346 Updates.push_back({DominatorTree::Insert, BBSubs[i], BBSubs[i + 1]});

1347 Updates.push_back({DominatorTree::Insert, BBSubs[i], BBNE});

1348 }

1349 Updates.push_back({DominatorTree::Insert, BBNE, BBTail});

1350 Updates.push_back({DominatorTree::Delete, BBCI, BBTail});

1352 }

1353}

1354

1355

1359 return false;

1360

1364 return false;

1365

1368 uint64_t Val = ConstInt->getZExtValue();

1369

1370 if (Val > N)

1371 return false;

1372 N = Val;

1373 } else

1374 return false;

1375

1377 return false;

1378

1386 IRB.CreateTrunc(Call->getArgOperand(1), ByteTy), BBNext, N);

1387

1388

1390 Type *IndexTy = DL.getIndexType(Call->getType());

1392

1394 Call->getContext(), "memchr.success", BB->getParent(), BBNext);

1399 if (DTU)

1401

1404 ConstantInt *CaseVal = ConstantInt::get(ByteTy, Str[I]);

1405 if (!Cases.insert(CaseVal).second)

1406 continue;

1407

1410 SI->addCase(CaseVal, BBCase);

1412 IndexPHI->addIncoming(ConstantInt::get(IndexTy, I), BBCase);

1414 if (DTU) {

1417 }

1418 }

1419

1423 PHI->addIncoming(FirstOccursLocation, BBSuccess);

1424

1425 Call->replaceAllUsesWith(PHI);

1426 Call->eraseFromParent();

1427

1428 if (DTU)

1430

1431 return true;

1432}

1433

1437 bool &MadeCFGChange) {

1438

1440 if (!CI || CI->isNoBuiltin())

1441 return false;

1442

1443 Function *CalledFunc = CI->getCalledFunction();

1444 if (!CalledFunc)

1445 return false;

1446

1447 LibFunc LF;

1448 if (!TLI.getLibFunc(*CalledFunc, LF) ||

1450 return false;

1451

1452 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Lazy);

1453

1454 switch (LF) {

1455 case LibFunc_sqrt:

1456 case LibFunc_sqrtf:

1457 case LibFunc_sqrtl:

1458 return foldSqrt(CI, LF, TTI, TLI, AC, DT);

1459 case LibFunc_strcmp:

1460 case LibFunc_strncmp:

1461 if (StrNCmpInliner(CI, LF, &DTU, DL).optimizeStrNCmp()) {

1462 MadeCFGChange = true;

1463 return true;

1464 }

1465 break;

1466 case LibFunc_memchr:

1468 MadeCFGChange = true;

1469 return true;

1470 }

1471 break;

1472 default:;

1473 }

1474 return false;

1475}

1476

1477

1478

1479

1480

1481

1482

1483

1484

1485

1486

1487

1488

1489

1490

1491

1492

1493

1494

1495

1496

1497

1498

1499

1500

1501

1502

1503

1504

1505

1506

1508 Type *Ty = I.getType();

1509 if (!Ty->isIntOrIntVectorTy())

1510 return false;

1511

1512 unsigned BitWidth = Ty->getScalarSizeInBits();

1515 return false;

1516

1517 auto CreateMulHigh = [&](Value *X, Value *Y) {

1520 Value *XExt = Builder.CreateZExt(X, NTy);

1521 Value *YExt = Builder.CreateZExt(Y, NTy);

1522 Value *Mul = Builder.CreateMul(XExt, YExt, "", true);

1524 Value *Res = Builder.CreateTrunc(High, Ty, "", true);

1526 I.replaceAllUsesWith(Res);

1527 LLVM_DEBUG(dbgs() << "Created long multiply from parts of " << *X << " and "

1528 << *Y << "\n");

1529 return true;

1530 };

1531

1532

1536 };

1538 return match(XhYl,

1541 };

1542

1545

1546 if (Carry->getOpcode() != Instruction::Select)

1548

1549

1550 Value *LowSum, *XhYl;

1551 if (match(Carry,

1557 return false;

1558

1559

1560 if (!CheckHiLo(XhYl, X, Y)) {

1561 if (CheckHiLo(XhYl, Y, X))

1563 else

1564 return false;

1565 }

1567 return false;

1568

1569

1573 return false;

1574

1575

1576 Value *XlYh, *XlYl;

1578 if (match(LowSum,

1586 return false;

1587

1588

1589 if (!CheckLoLo(XlYl, X, Y))

1590 return false;

1591 if (!CheckHiLo(XlYh, Y, X))

1592 return false;

1593

1594 return CreateMulHigh(X, Y);

1595 };

1596

1599

1600

1601

1602 Value *XlYh, *XhYl, *XlYl, *C2, *C3;

1603

1606 return false;

1607

1610

1623 XhYl = C3;

1624 } else {

1625

1633 return false;

1634

1635

1638 return false;

1639 }

1640

1641

1642 if (!CheckHiLo(XlYh, Y, X))

1644 if (!CheckHiLo(XlYh, Y, X))

1645 return false;

1646 if (!CheckHiLo(XhYl, X, Y))

1647 return false;

1648 if (!CheckLoLo(XlYl, X, Y))

1649 return false;

1650

1651 return CreateMulHigh(X, Y);

1652 };

1653

1656

1657

1658

1659

1660 auto ShiftAdd =

1662 if (match(A, ShiftAdd))

1664 if (match(A, ShiftAdd))

1668 return false;

1669

1670

1671 Value *XhYl, *XlYh;

1674 return false;

1675 if (!CheckHiLo(XhYl, X, Y))

1678 return false;

1680 return false;

1681

1682

1708 return false;

1709 if (!CheckLoLo(XlYl, X, Y))

1710 return false;

1711

1712 return CreateMulHigh(X, Y);

1713 };

1714

1717

1718

1719

1720 if (Carry->getOpcode() != Instruction::Select)

1722 if (Carry->getOpcode() != Instruction::Select)

1724

1725

1726 Value *CrossSum, *XhYl;

1727 if (match(Carry,

1733 return false;

1734

1738 return false;

1739

1740 Value *XlYl, *LowAccum;

1747 return false;

1748 if (!CheckLoLo(XlYl, X, Y))

1749 return false;

1750

1751 if (!CheckHiLo(XhYl, X, Y))

1753 if (!CheckHiLo(XhYl, X, Y))

1754 return false;

1759 return false;

1760

1761 return CreateMulHigh(X, Y);

1762 };

1763

1764

1765

1774 A->hasOneUse() && B->hasOneUse())

1775 if (FoldMulHighCarry(X, Y, A, B) || FoldMulHighLadder(X, Y, A, B))

1776 return true;

1777

1793 A->hasOneUse() && B->hasOneUse() && C->hasOneUse())

1794 return FoldMulHighCarry4(X, Y, A, B, C) ||

1795 FoldMulHighLadder4(X, Y, A, B, C);

1796

1797 return false;

1798}

1799

1800

1801

1802

1807 bool MadeChange = false;

1809

1811 continue;

1812

1814

1815

1816

1817

1818

1819

1830

1831

1832

1834 }

1835

1836

1838 }

1839

1840

1841 if (MadeChange)

1844

1845 return MadeChange;

1846}

1847

1848

1849

1853 bool MadeChange = false;

1856 MadeChange |= TIC.run(F);

1858 return MadeChange;

1859}

1860

1868 bool MadeCFGChange = false;

1869 if (runImpl(F, AC, TTI, TLI, DT, AA, MadeCFGChange)) {

1870

1872 }

1873

1875 if (MadeCFGChange)

1877 else

1879 return PA;

1880}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

AMDGPU Register Bank Select

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static bool tryToRecognizePopCount(Instruction &I)

Definition AggressiveInstCombine.cpp:302

static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT)

Try to replace a mathlib call to sqrt with the LLVM intrinsic.

Definition AggressiveInstCombine.cpp:438

static bool foldAnyOrAllBitsSet(Instruction &I)

Match patterns that correspond to "any-bits-set" and "all-bits-set".

Definition AggressiveInstCombine.cpp:258

static cl::opt< unsigned > MemChrInlineThreshold("memchr-inline-threshold", cl::init(3), cl::Hidden, cl::desc("The maximum length of a constant string to " "inline a memchr call."))

static bool tryToFPToSat(Instruction &I, TargetTransformInfo &TTI)

Fold smin(smax(fptosi(x), C1), C2) to llvm.fptosi.sat(x), providing C1 and C2 saturate the value of t...

Definition AggressiveInstCombine.cpp:383

static cl::opt< unsigned > StrNCmpInlineThreshold("strncmp-inline-threshold", cl::init(3), cl::Hidden, cl::desc("The maximum length of a constant string for a builtin string cmp " "call eligible for inlining. The default value is 3."))

static bool matchAndOrChain(Value *V, MaskOps &MOps)

This is a recursive helper for foldAnyOrAllBitsSet() that walks through a chain of 'and' or 'or' inst...

Definition AggressiveInstCombine.cpp:212

static bool foldMemChr(CallInst *Call, DomTreeUpdater *DTU, const DataLayout &DL)

Convert memchr with a small constant string into a switch.

Definition AggressiveInstCombine.cpp:1356

static Value * optimizeShiftInOrChain(Value *V, IRBuilder<> &Builder)

Combine away instructions providing they are still equivalent when compared against 0.

Definition AggressiveInstCombine.cpp:1014

static bool foldConsecutiveLoads(Instruction &I, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA, const DominatorTree &DT)

Definition AggressiveInstCombine.cpp:790

static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT)

Match a pattern for a bitwise funnel/rotate operation that partially guards against undefined behavio...

Definition AggressiveInstCombine.cpp:74

static bool tryToRecognizeTableBasedCttz(Instruction &I, const DataLayout &DL)

Definition AggressiveInstCombine.cpp:544

static bool mergePartStores(SmallVectorImpl< PartStore > &Parts, const DataLayout &DL, TargetTransformInfo &TTI)

Definition AggressiveInstCombine.cpp:940

static bool mergeConsecutivePartStores(ArrayRef< PartStore > Parts, unsigned Width, const DataLayout &DL, TargetTransformInfo &TTI)

Definition AggressiveInstCombine.cpp:890

static cl::opt< unsigned > MaxInstrsToScan("aggressive-instcombine-max-scan-instrs", cl::init(64), cl::Hidden, cl::desc("Max number of instructions to scan for aggressive instcombine."))

static bool foldLoadsRecursive(Value *V, LoadOps &LOps, const DataLayout &DL, AliasAnalysis &AA)

Definition AggressiveInstCombine.cpp:645

static bool foldICmpOrChain(Instruction &I, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA, const DominatorTree &DT)

Definition AggressiveInstCombine.cpp:1042

static bool isCTTZTable(Constant *Table, const APInt &Mul, const APInt &Shift, const APInt &AndMask, Type *AccessTy, unsigned InputBits, const APInt &GEPIdxFactor, const DataLayout &DL)

Definition AggressiveInstCombine.cpp:470

static std::optional< PartStore > matchPartStore(Instruction &I, const DataLayout &DL)

Definition AggressiveInstCombine.cpp:866

static bool foldConsecutiveStores(BasicBlock &BB, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA)

Definition AggressiveInstCombine.cpp:973

static std::pair< APInt, APInt > getStrideAndModOffsetOfGEP(Value *PtrOp, const DataLayout &DL)

Definition AggressiveInstCombine.cpp:1071

static bool foldPatternedLoads(Instruction &I, const DataLayout &DL)

If C is a constant patterned array and all valid loaded results for given alignment are same to a con...

Definition AggressiveInstCombine.cpp:1112

static bool foldLibCalls(Instruction &I, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, const DataLayout &DL, bool &MadeCFGChange)

Definition AggressiveInstCombine.cpp:1434

static bool foldMulHigh(Instruction &I)

Match high part of long multiplication.

Definition AggressiveInstCombine.cpp:1507

static bool foldUnusualPatterns(Function &F, DominatorTree &DT, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AliasAnalysis &AA, AssumptionCache &AC, bool &MadeCFGChange)

This is the entry point for folds that could be implemented in regular InstCombine,...

Definition AggressiveInstCombine.cpp:1803

AggressiveInstCombiner - Combine expression patterns to form expressions with fewer,...

This is the interface for LLVM's primary stateless and local alias analysis.

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static bool runImpl(Function &F, const TargetLowering &TLI, const LibcallLoweringInfo &Libcalls, AssumptionCache *AC)

This is the interface for a simple mod/ref and alias analysis over globals.

static MaybeAlign getAlign(Value *Ptr)

static Instruction * matchFunnelShift(Instruction &Or, InstCombinerImpl &IC)

Match UB-safe variants of the funnel shift intrinsic.

This file contains the declarations for profiling metadata utility functions.

static const MCExpr * MaskShift(const MCExpr *Val, uint32_t Mask, uint32_t Shift, MCContext &Ctx)

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")

static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")

This pass exposes codegen information to IR-level passes.

A manager for alias analyses.

Class for arbitrary precision integers.

static APInt getAllOnes(unsigned numBits)

Return an APInt of a specified width with all bits set.

uint64_t getZExtValue() const

Get zero extended value.

void setBit(unsigned BitPosition)

Set the given bit to 1 whose position is given as "bitPosition".

unsigned getBitWidth() const

Return the number of bits in the APInt.

bool isNegative() const

Determine sign of this APInt.

static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)

Return a value containing V broadcasted over NewLen bits.

LLVM_ABI APInt srem(const APInt &RHS) const

Function for signed remainder operation.

APInt shl(unsigned shiftAmt) const

Left-shift function.

bool isSubsetOf(const APInt &RHS) const

This operation checks that all bits set in this APInt are also set in RHS.

static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)

Constructs an APInt value that has the bottom loBitsSet bits set.

bool slt(const APInt &RHS) const

Signed less than comparison.

static APInt getOneBitSet(unsigned numBits, unsigned BitNo)

Return an APInt with exactly one bit set in the result.

bool uge(const APInt &RHS) const

Unsigned greater or equal comparison.

PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)

Definition AggressiveInstCombine.cpp:1861

PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)

Get the result of an analysis pass for a given IR unit.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

const T & front() const

front - Get the first element.

size_t size() const

size - Get the array size.

A function analysis which provides an AssumptionCache.

A cache of @llvm.assume calls within a function.

LLVM Basic Block Representation.

iterator begin()

Instruction iterator methods.

LLVM_ABI const_iterator getFirstInsertionPt() const

Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...

const Function * getParent() const

Return the enclosing method, or null if none.

static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)

Creates a new BasicBlock.

const Instruction * getTerminator() const LLVM_READONLY

Returns the terminator instruction if the block is well formed or null if the block is not well forme...

This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...

ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)

Represents analyses that only rely on functions' control flow.

Value * getArgOperand(unsigned i) const

This class represents a function call, abstracting a target machine's calling convention.

@ ICMP_ULT

unsigned less than

An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...

This is the shared class of boolean and integer constants.

uint64_t getZExtValue() const

Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...

This is an important base class in LLVM.

static LLVM_ABI Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

A parsed version of the target data layout string in and methods for querying it.

static LLVM_ABI DebugLoc getMergedLocations(ArrayRef< DebugLoc > Locs)

Try to combine the vector of locations passed as input in a single one.

Analysis pass which computes a DominatorTree.

static constexpr UpdateKind Insert

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

LLVM_ABI bool isReachableFromEntry(const Use &U) const

Provide an overload for a Use.

LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const

Return true if the (end of the) basic block BB dominates the use U.

void applyUpdates(ArrayRef< UpdateT > Updates)

Submit updates to all available trees.

an instruction for type-safe pointer arithmetic to access elements of arrays and structs

const Constant * getInitializer() const

getInitializer - Return the initializer for this global variable.

bool hasInitializer() const

Definitions have initializers, declarations don't.

bool isConstant() const

If the value is a global constant, its value is immutable throughout the runtime execution of the pro...

static bool isEquality(Predicate P)

Return true if this predicate is either EQ or NE.

void SetCurrentDebugLocation(DebugLoc L)

Set location information used by debugging information.

PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")

SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)

Create a switch instruction with the specified value, default dest, and with a hint for the number of...

Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)

BranchInst * CreateBr(BasicBlock *Dest)

Create an unconditional 'br label X' instruction.

void SetInsertPoint(BasicBlock *TheBB)

This specifies that created instructions should be appended to the end of the specified block.

Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")

IntegerType * getInt8Ty()

Fetch the type representing an 8-bit integer.

This provides a uniform API for creating instructions and inserting them into a basic block: either a...

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

LLVM_ABI void setAAMetadata(const AAMDNodes &N)

Sets the AA metadata on this instruction from the AAMDNodes structure.

LLVM_ABI InstListType::iterator eraseFromParent()

This method unlinks 'this' from the containing basic block and deletes it.

LLVM_ABI const Function * getFunction() const

Return the function this instruction belongs to.

LLVM_ABI AAMDNodes getAAMetadata() const

Returns the AA metadata for this instruction.

Class to represent integer types.

static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)

This static method is the primary way of constructing an IntegerType.

This is an important class for using LLVM in a threaded context.

An instruction for reading from memory.

unsigned getPointerAddressSpace() const

Returns the address space of the pointer operand.

Value * getPointerOperand()

static LocationSize precise(uint64_t Value)

LLVM_ABI MDNode * createUnlikelyBranchWeights()

Return metadata containing two branch weights, with significant bias towards false destination.

std::pair< KeyT, ValueT > & front()

Representation for a specific memory location.

static LLVM_ABI MemoryLocation get(const LoadInst *LI)

Return a location with information about the memory reference by the given instruction.

static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())

Return a location that may access any location before or after Ptr, while remaining within the underl...

void addIncoming(Value *V, BasicBlock *BB)

Add an incoming value to the end of the PHI list.

static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...

A set of analyses that are preserved following a run of a transformation pass.

static PreservedAnalyses all()

Construct a special preserved set that preserves all passes.

PreservedAnalyses & preserveSet()

Mark an analysis set as preserved.

PreservedAnalyses & preserve()

Mark an analysis as preserved.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void reserve(size_type N)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

StringRef - Represent a constant reference to a string, i.e.

static constexpr size_t npos

Analysis pass providing the TargetTransformInfo.

Analysis pass providing the TargetLibraryInfo.

Provides information about what library functions are available for the current target.

bool getLibFunc(StringRef funcName, LibFunc &F) const

Searches for a particular function name.

This pass provides access to the codegen interfaces that are needed for IR-level transformations.

@ TCK_RecipThroughput

Reciprocal throughput.

@ None

The cast is not used with a load/store of any kind.

bool run(Function &F)

Perform TruncInst pattern optimization on given function.

The instances of the Type class are immutable: once they are created, they are never changed.

LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY

Return the basic size of this type if it is a primitive type.

LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const

Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...

LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY

If this is a vector type, return the getPrimitiveSizeInBits value for the element type.

bool isIntegerTy() const

True if this is an instance of IntegerType.

static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

bool hasOneUse() const

Return true if there is exactly one use of this value.

LLVM_ABI void replaceAllUsesWith(Value *V)

Change all uses of this to point to a new Value.

LLVM_ABI bool hasNUsesOrMore(unsigned N) const

Return true if this value has N uses or more.

LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const

Accumulate the constant offset this value has compared to a base pointer.

LLVM_ABI LLVMContext & getContext() const

All values hold a context through their type.

LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const

Returns the number of bytes known to be dereferenceable for the pointer value.

LLVM_ABI StringRef getName() const

Return a constant reference to the value's name.

LLVM_ABI void takeName(Value *V)

Transfer the name from V to this value.

static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)

This static method is the primary way to construct an VectorType.

const ParentTy * getParent() const

Abstract Attribute helper functions.

LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)

Compute GCD of two unsigned APInt values.

@ Fast

Attempts to make calls as fast as possible (e.g.

@ C

The default llvm calling convention, compatible with C.

@ BasicBlock

Various leaf nodes.

SpecificConstantMatch m_ZeroInt()

Convenience matchers for specific integer values.

BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)

Matches a register negated by a G_SUB.

OneUse_match< SubPat > m_OneUse(const SubPat &SP)

BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)

ShiftLike_match< LHS, Instruction::LShr > m_LShrOrSelf(const LHS &L, uint64_t &R)

Matches lshr L, ConstShAmt or L itself (R will be set to zero in this case).

BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)

match_combine_or< CastInst_match< OpTy, CastInst >, OpTy > m_CastOrSelf(const OpTy &Op)

Matches any cast or self. Used to ignore casts.

ap_match< APInt > m_APInt(const APInt *&Res)

Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.

BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)

Matches an And with LHS and RHS in either order.

CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)

Matches Trunc.

specific_intval< false > m_SpecificInt(const APInt &V)

Match a specific integer value or vector with all elements equal to the value.

bool match(Val *V, const Pattern &P)

bind_ty< Instruction > m_Instruction(Instruction *&I)

Match an instruction, capturing it if we match.

specificval_ty m_Specific(const Value *V)

Match if we have a specific specified value.

cst_pred_ty< is_one > m_One()

Match an integer 1 or a vector with all elements equal to 1.

ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)

Matches SelectInst.

MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)

ShiftLike_match< LHS, Instruction::Shl > m_ShlOrSelf(const LHS &L, uint64_t &R)

Matches shl L, ConstShAmt or L itself (R will be set to zero in this case).

BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)

deferredval_ty< Value > m_Deferred(Value *const &V)

Like m_Specific(), but works if the specific value to match is determined as part of the same match()...

specific_bbval m_SpecificBB(BasicBlock *BB)

Match a specific basic block value.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)

SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)

CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)

Matches ZExt.

OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)

brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)

BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)

Matches a Add with LHS and RHS in either order.

CastInst_match< OpTy, FPToSIInst > m_FPToSI(const OpTy &Op)

MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)

class_match< Value > m_Value()

Match an arbitrary value and ignore it.

BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)

CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)

is_zero m_Zero()

Match any null constant or a vector with all elements equal to 0.

BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)

Matches an Or with LHS and RHS in either order.

BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)

Matches a Mul with LHS and RHS in either order.

BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)

match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)

Combine two pattern matchers matching L || R.

initializer< Ty > init(const Ty &Val)

NodeAddr< PhiNode * > Phi

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

@ Low

Lower the current thread's priority such that it does not affect foreground tasks significantly.

FunctionAddr VTableAddr Value

LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)

Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI)

LLVM_ABI bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)

This function computes the length of a null-terminated C string pointed to by V.

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)

Make a range that does early increment to allow mutation of the underlying range without disrupting i...

LLVM_ABI bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)

Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...

LLVM_ABI void setExplicitlyUnknownBranchWeights(Instruction &I, StringRef PassName)

Specify that the branch weights for this terminator cannot be known at compile time.

LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)

Return true if 'V & Mask' is known to be zero.

LLVM_ABI bool isLibFuncEmittable(const Module *M, const TargetLibraryInfo *TLI, LibFunc TheLibFunc)

Check whether the library function is available on target and also that it in the current Module is a...

auto dyn_cast_or_null(const Y &Val)

auto reverse(ContainerTy &&C)

constexpr bool isPowerOf2_32(uint32_t Value)

Return true if the argument is a power of two > 0.

bool isModSet(const ModRefInfo MRI)

void sort(IteratorTy Start, IteratorTy End)

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

bool isModOrRefSet(const ModRefInfo MRI)

LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)

Extract value of C at the given Offset reinterpreted as Ty.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

@ First

Helpers to iterate all locations in the MemoryEffectsBase class.

IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >

@ Sub

Subtraction of integers.

ArrayRef(const T &OneElt) -> ArrayRef< T >

constexpr unsigned BitWidth

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)

Split the specified block at the specified instruction.

cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))

Definition AggressiveInstCombine.cpp:47

AnalysisManager< Function > FunctionAnalysisManager

Convenience typedef for the Function analysis manager.

LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)

Returns true if V cannot be poison, but may be undef.

LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)

This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....

AAResults AliasAnalysis

Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.

LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)

Return true if we can prove that the specified FP value is either NaN or never less than -0....

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This is used by foldLoadsRecursive() to capture a Root Load node which is of type or(load,...

Definition AggressiveInstCombine.cpp:632

Type * ZextType

Definition AggressiveInstCombine.cpp:638

uint64_t Shift

Definition AggressiveInstCombine.cpp:637

AAMDNodes AATags

Definition AggressiveInstCombine.cpp:639

LoadInst * RootInsert

Definition AggressiveInstCombine.cpp:634

LoadInst * Root

Definition AggressiveInstCombine.cpp:633

uint64_t LoadSize

Definition AggressiveInstCombine.cpp:636

bool FoundRoot

Definition AggressiveInstCombine.cpp:635

ValWidth bits starting at ValOffset of Val stored at PtrBase+PtrOffset.

Definition AggressiveInstCombine.cpp:849

uint64_t ValOffset

Definition AggressiveInstCombine.cpp:853

uint64_t ValWidth

Definition AggressiveInstCombine.cpp:854

APInt PtrOffset

Definition AggressiveInstCombine.cpp:851

StoreInst * Store

Definition AggressiveInstCombine.cpp:855

bool operator<(const PartStore &Other) const

Definition AggressiveInstCombine.cpp:861

bool isCompatibleWith(const PartStore &Other) const

Definition AggressiveInstCombine.cpp:857

Value * PtrBase

Definition AggressiveInstCombine.cpp:850

Value * Val

Definition AggressiveInstCombine.cpp:852

A collection of metadata nodes that might be associated with a memory access used by the alias-analys...

LLVM_ABI AAMDNodes concat(const AAMDNodes &Other) const

Determine the best AAMDNodes after concatenating two different locations together.

A MapVector that performs no allocations if smaller than a certain size.