LLVM: lib/Transforms/Scalar/GuardWidening.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

62#include

63

64using namespace llvm;

65

66#define DEBUG_TYPE "guard-widening"

67

68STATISTIC(GuardsEliminated, "Number of eliminated guards");

69STATISTIC(CondBranchEliminated, "Number of eliminated conditional branches");

70STATISTIC(FreezeAdded, "Number of freeze instruction introduced");

71

74 cl::desc("Whether or not we should widen guards "

75 "expressed as branches by widenable conditions"),

77

78

81 assert(GI->getIntrinsicID() == Intrinsic::experimental_guard &&

82 "Bad guard intrinsic?");

83 return GI->getArgOperand(0);

84 }

89

91}

92

93

94

97 assert(GI->getIntrinsicID() == Intrinsic::experimental_guard &&

98 "Bad guard intrinsic?");

99 GI->setArgOperand(0, NewCond);

100 return;

101 }

103}

104

105

108 if (MSSAU)

110 ++GuardsEliminated;

111}

112

113

114

115

116

117

118

119

120

121

122static std::optionalBasicBlock::iterator

128 return std::nullopt;

129}

130

131namespace {

132

133class GuardWideningImpl {

134 DominatorTree &DT;

135 PostDominatorTree *PDT;

136 LoopInfo &LI;

137 AssumptionCache ∾

138 MemorySSAUpdater *MSSAU;

139

140

141

143 std::function<bool(BasicBlock*)> BlockFilter;

144

145

146

147 SmallVector<Instruction *, 16> EliminatedGuardsAndBranches;

148

149

150

151 DenseSet<Instruction *> WidenedGuards;

152

153

154

155

156

157 bool eliminateInstrViaWidening(

158 Instruction *Instr, const df_iterator<DomTreeNode *> &DFSI,

159 const DenseMap<BasicBlock *, SmallVector<Instruction *, 8>>

160 &GuardsPerBlock);

161

162

163 enum WideningScore {

164

165 WS_IllegalOrNegative,

166

167

168

169

170 WS_Neutral,

171

172

173 WS_Positive,

174

175

176

177 WS_VeryPositive

178 };

179

180 static StringRef scoreTypeToString(WideningScore WS);

181

182

183

184 WideningScore computeWideningScore(Instruction *DominatedInstr,

185 Instruction *ToWiden,

187 SmallVectorImpl<Value *> &ChecksToHoist,

188 SmallVectorImpl<Value *> &ChecksToWiden);

189

190

192 SmallPtrSet<const Instruction *, 8> Visited;

193 return canBeHoistedTo(V, InsertPos, Visited);

194 }

195

197 SmallPtrSetImpl<const Instruction *> &Visited) const;

198

199 bool canBeHoistedTo(const SmallVectorImpl<Value *> &Checks,

201 return all_of(Checks,

202 [&](const Value *V) { return canBeHoistedTo(V, InsertPos); });

203 }

204

205

207

208 void makeAvailableAt(const SmallVectorImpl<Value *> &Checks,

210 for (Value *V : Checks)

211 makeAvailableAt(V, InsertPos);

212 }

213

214

215

216

217

218

219

220

221 std::optional<Value *>

222 mergeChecks(SmallVectorImpl<Value *> &ChecksToHoist,

223 SmallVectorImpl<Value *> &ChecksToWiden,

224 std::optionalBasicBlock::iterator InsertPt);

225

226

227

228 Value *hoistChecks(SmallVectorImpl<Value *> &ChecksToHoist,

230

231

232

234

235

236

237

238

239 class RangeCheck {

240 const Value *Base;

241 const ConstantInt *Offset;

242 const Value *Length;

243 ICmpInst *CheckInst;

244

245 public:

246 explicit RangeCheck(const Value *Base, const ConstantInt *Offset,

247 const Value *Length, ICmpInst *CheckInst)

248 : Base(Base), Offset(Offset), Length(Length), CheckInst(CheckInst) {}

249

250 void setBase(const Value *NewBase) { Base = NewBase; }

251 void setOffset(const ConstantInt *NewOffset) { Offset = NewOffset; }

252

253 const Value *getBase() const { return Base; }

254 const ConstantInt *getOffset() const { return Offset; }

255 const APInt &getOffsetValue() const { return getOffset()->getValue(); }

256 const Value *getLength() const { return Length; };

257 ICmpInst *getCheckInst() const { return CheckInst; }

258

259 void print(raw_ostream &OS, bool PrintTypes = false) {

260 OS << "Base: ";

261 Base->printAsOperand(OS, PrintTypes);

262 OS << " Offset: ";

263 Offset->printAsOperand(OS, PrintTypes);

264 OS << " Length: ";

265 Length->printAsOperand(OS, PrintTypes);

266 }

267

270 dbgs() << "\n";

271 }

272 };

273

274

275

276

277 bool parseRangeChecks(SmallVectorImpl<Value *> &ToParse,

278 SmallVectorImpl &Checks) {

279 for (auto CheckCond : ToParse) {

280 if (!parseRangeChecks(CheckCond, Checks))

281 return false;

282 }

283 return true;

284 }

285

286 bool parseRangeChecks(Value *CheckCond, SmallVectorImpl &Checks);

287

288

289

290

291

292 bool combineRangeChecks(SmallVectorImpl &Checks,

293 SmallVectorImpl &CombinedChecks) const;

294

295

296

297 bool isWideningCondProfitable(SmallVectorImpl<Value *> &ChecksToHoist,

298 SmallVectorImpl<Value *> &ChecksToWiden) {

299 return mergeChecks(ChecksToHoist, ChecksToWiden, std::nullopt)

300 .has_value();

301 }

302

303

304 void widenGuard(SmallVectorImpl<Value *> &ChecksToHoist,

305 SmallVectorImpl<Value *> &ChecksToWiden,

306 Instruction *ToWiden) {

308 auto MergedCheck = mergeChecks(ChecksToHoist, ChecksToWiden, InsertPt);

309 Value *Result = MergedCheck ? *MergedCheck

310 : hoistChecks(ChecksToHoist,

312

315 return;

316 }

318 }

319

320public:

321 explicit GuardWideningImpl(DominatorTree &DT, PostDominatorTree *PDT,

322 LoopInfo &LI, AssumptionCache &AC,

323 MemorySSAUpdater *MSSAU, DomTreeNode *Root,

324 std::function<bool(BasicBlock *)> BlockFilter)

325 : DT(DT), PDT(PDT), LI(LI), AC(AC), MSSAU(MSSAU), Root(Root),

326 BlockFilter(BlockFilter) {}

327

328

329 bool run();

330};

331}

332

335 return true;

337 return true;

338 return false;

339}

340

341bool GuardWideningImpl::run() {

342 DenseMap<BasicBlock *, SmallVector<Instruction *, 8>> GuardsInBlock;

345 DFI != DFE; ++DFI) {

346 auto *BB = (*DFI)->getBlock();

347 if (!BlockFilter(BB))

348 continue;

349

350 auto &CurrentList = GuardsInBlock[BB];

351

352 for (auto &I : *BB)

355

356 for (auto *II : CurrentList)

357 Changed |= eliminateInstrViaWidening(II, DFI, GuardsInBlock);

358 }

359

361 for (auto *I : EliminatedGuardsAndBranches)

362 if (!WidenedGuards.count(I)) {

366 else {

368 "Eliminated something other than guard or branch?");

369 ++CondBranchEliminated;

370 }

371 }

372

374}

375

376bool GuardWideningImpl::eliminateInstrViaWidening(

377 Instruction *Instr, const df_iterator<DomTreeNode *> &DFSI,

378 const DenseMap<BasicBlock *, SmallVector<Instruction *, 8>>

379 &GuardsInBlock) {

382

383

384

385 if (ChecksToHoist.empty() ||

387 return false;

388

390 auto BestScoreSoFar = WS_IllegalOrNegative;

391

392

393

394 for (unsigned i = 0, e = DFSI.getPathLength(); i != e; ++i) {

395 auto *CurBB = DFSI.getPath(i)->getBlock();

396 if (!BlockFilter(CurBB))

397 break;

398 assert(GuardsInBlock.count(CurBB) && "Must have been populated by now!");

399 const auto &GuardsInCurBB = GuardsInBlock.find(CurBB)->second;

400

401 auto I = GuardsInCurBB.begin();

402 auto E = Instr->getParent() == CurBB ? find(GuardsInCurBB, Instr)

403 : GuardsInCurBB.end();

404

405#ifndef NDEBUG

406 {

407 unsigned Index = 0;

408 for (auto &I : *CurBB) {

409 if (Index == GuardsInCurBB.size())

410 break;

411 if (GuardsInCurBB[Index] == &I)

413 }

414 assert(Index == GuardsInCurBB.size() &&

415 "Guards expected to be in order!");

416 }

417#endif

418

419 assert((i == (e - 1)) == (Instr->getParent() == CurBB) && "Bad DFS?");

420

423 if (!WideningPoint)

424 continue;

427 auto Score = computeWideningScore(Instr, Candidate, *WideningPoint,

428 ChecksToHoist, CandidateChecks);

429 LLVM_DEBUG(dbgs() << "Score between " << *Instr << " and " << *Candidate

430 << " is " << scoreTypeToString(Score) << "\n");

431 if (Score > BestScoreSoFar) {

432 BestScoreSoFar = Score;

433 BestSoFar = Candidate;

434 }

435 }

436 }

437

438 if (BestScoreSoFar == WS_IllegalOrNegative) {

439 LLVM_DEBUG(dbgs() << "Did not eliminate guard " << *Instr << "\n");

440 return false;

441 }

442

443 assert(BestSoFar != Instr && "Should have never visited same guard!");

445

446 LLVM_DEBUG(dbgs() << "Widening " << *Instr << " into " << *BestSoFar

447 << " with score " << scoreTypeToString(BestScoreSoFar)

448 << "\n");

451 widenGuard(ChecksToHoist, ChecksToWiden, BestSoFar);

454 EliminatedGuardsAndBranches.push_back(Instr);

455 WidenedGuards.insert(BestSoFar);

456 return true;

457}

458

459GuardWideningImpl::WideningScore GuardWideningImpl::computeWideningScore(

460 Instruction *DominatedInstr, Instruction *ToWiden,

462 SmallVectorImpl<Value *> &ChecksToWiden) {

464 Loop *DominatingGuardLoop = LI.getLoopFor(WideningPoint->getParent());

465 bool HoistingOutOfLoop = false;

466

467 if (DominatingGuardLoop != DominatedInstrLoop) {

468

469

470 if (DominatingGuardLoop &&

471 !DominatingGuardLoop->contains(DominatedInstrLoop))

472 return WS_IllegalOrNegative;

473

474 HoistingOutOfLoop = true;

475 }

476

477 if (!canBeHoistedTo(ChecksToHoist, WideningPoint))

478 return WS_IllegalOrNegative;

479

480

481

482 if (!canBeHoistedTo(getCondition(ToWiden), WideningPoint))

483 return WS_IllegalOrNegative;

484

485

486

487

488

489

490

491

492

493 if (isWideningCondProfitable(ChecksToHoist, ChecksToWiden))

494 return HoistingOutOfLoop ? WS_VeryPositive : WS_Positive;

495

496 if (HoistingOutOfLoop)

497 return WS_Positive;

498

499

500

501 auto GetLikelySuccessor = [](const BasicBlock * BB)->const BasicBlock * {

502 if (auto *UniqueSucc = BB->getUniqueSuccessor())

503 return UniqueSucc;

504 auto *Term = BB->getTerminator();

506 const BasicBlock *IfTrue = nullptr, *IfFalse = nullptr;

507 using namespace PatternMatch;

510 return nullptr;

511

513 return ConstCond->isAllOnesValue() ? IfTrue : IfFalse;

514

515 if (IfFalse->getPostdominatingDeoptimizeCall())

516 return IfTrue;

518 return IfFalse;

519

520

521 return nullptr;

522 };

523

524

525

526

527

528 auto MaybeHoistingToHotterBlock = [&]() {

529 const auto *DominatingBlock = WideningPoint->getParent();

530 const auto *DominatedBlock = DominatedInstr->getParent();

531

532

535 assert(DT.dominates(DominatingBlock, DominatedBlock) && "No dominance");

536 while (DominatedBlock != DominatingBlock) {

537 auto *LikelySucc = GetLikelySuccessor(DominatingBlock);

538

539 if (!LikelySucc)

540 break;

541

543 break;

544 DominatingBlock = LikelySucc;

545 }

546

547

548 if (DominatedBlock == DominatingBlock)

549 return false;

550

551

552 if (!DT.dominates(DominatingBlock, DominatedBlock))

553 return true;

554

555 if (!PDT)

556 return true;

557 return !PDT->dominates(DominatedBlock, DominatingBlock);

558 };

559

560 return MaybeHoistingToHotterBlock() ? WS_IllegalOrNegative : WS_Neutral;

561}

562

563bool GuardWideningImpl::canBeHoistedTo(

565 SmallPtrSetImpl<const Instruction *> &Visited) const {

567 if (!Inst || DT.dominates(Inst, Loc) || Visited.count(Inst))

568 return true;

569

571 Inst->mayReadFromMemory())

572 return false;

573

574 Visited.insert(Inst);

575

576

578 "PHIs should return false for isSafeToSpeculativelyExecute");

580 "We did a DFS from the block entry!");

581 return all_of(Inst->operands(),

582 [&](Value *Op) { return canBeHoistedTo(Op, Loc, Visited); });

583}

584

585void GuardWideningImpl::makeAvailableAt(Value *V,

588 if (!Inst || DT.dominates(Inst, Loc))

589 return;

590

592 !Inst->mayReadFromMemory() &&

593 "Should've checked with canBeHoistedTo!");

594

595 for (Value *Op : Inst->operands())

596 makeAvailableAt(Op, Loc);

597

598 Inst->moveBefore(*Loc->getParent(), Loc);

599}

600

601

602

603static std::optionalBasicBlock::iterator

606 if (I)

608

609 std::optionalBasicBlock::iterator Res = I->getInsertionPointAfterDef();

610

611 if (!Res || !DT.dominates(I, &**Res))

612 return std::nullopt;

613

615

616

617

619 Instruction *User = cast(U);

620 return ResInst != User && DT.dominates(I, User) &&

621 !DT.dominates(ResInst, User);

622 }))

623 return std::nullopt;

624 return Res;

625}

626

627Value *GuardWideningImpl::freezeAndPush(Value *Orig,

630 return Orig;

631 std::optionalBasicBlock::iterator InsertPtAtDef =

633 if (!InsertPtAtDef) {

634 FreezeInst *FI = new FreezeInst(Orig, "gw.freeze");

635 FI->insertBefore(*InsertPt->getParent(), InsertPt);

636 return FI;

637 }

640 FreezeInst *FI = new FreezeInst(Orig, "gw.freeze");

641 FI->insertBefore(*InsertPt->getParent(), InsertPt);

642 return FI;

643 }

644

645 SmallPtrSet<Value *, 16> Visited;

646 SmallVector<Value *, 16> Worklist;

647 SmallPtrSet<Instruction *, 16> DropPoisonFlags;

648 SmallVector<Value *, 16> NeedFreeze;

649 DenseMap<Value *, FreezeInst *> CacheOfFreezes;

650

651

652

653

654 auto handleConstantOrGlobal = [&](Use &U) {

657 return false;

658

659 if (Visited.insert(Def).second) {

661 return true;

663 FreezeInst *FI = new FreezeInst(Def, Def->getName() + ".gw.fr");

664 FI->insertBefore(*InsertPt->getParent(), InsertPt);

665 CacheOfFreezes[Def] = FI;

666 }

667

668 if (auto It = CacheOfFreezes.find(Def); It != CacheOfFreezes.end())

669 U.set(It->second);

670 return true;

671 };

672

674 while (!Worklist.empty()) {

676 if (!Visited.insert(V).second)

677 continue;

678

680 continue;

681

684 false)) {

686 continue;

687 }

688

689

691 return isa(Op) && !getFreezeInsertPt(Op, DT);

692 })) {

694 continue;

695 }

696 DropPoisonFlags.insert(I);

697 for (Use &U : I->operands())

698 if (!handleConstantOrGlobal(U))

700 }

701 for (Instruction *I : DropPoisonFlags)

702 I->dropPoisonGeneratingAnnotations();

703

705 for (Value *V : NeedFreeze) {

707 FreezeInst *FI = new FreezeInst(V, V->getName() + ".gw.fr");

708 FI->insertBefore(*FreezeInsertPt->getParent(), FreezeInsertPt);

709 ++FreezeAdded;

710 if (V == Orig)

712 V->replaceUsesWithIf(

713 FI, [&](const Use & U)->bool { return U.getUser() != FI; });

714 }

715

717}

718

719std::optional<Value *>

720GuardWideningImpl::mergeChecks(SmallVectorImpl<Value *> &ChecksToHoist,

721 SmallVectorImpl<Value *> &ChecksToWiden,

722 std::optionalBasicBlock::iterator InsertPt) {

723 using namespace llvm::PatternMatch;

724

726 {

727

728 ConstantInt *RHS0, *RHS1;

730 CmpPredicate Pred0, Pred1;

731

732

733 if (ChecksToWiden.size() == 1 && ChecksToHoist.size() == 1 &&

738

739 ConstantRange CR0 =

741 ConstantRange CR1 =

743

744

745

746

747 if (std::optional Intersect =

749 APInt NewRHSAP;

751 if (Intersect->getEquivalentICmp(Pred, NewRHSAP)) {

752 if (InsertPt) {

753 ConstantInt *NewRHS =

754 ConstantInt::get((*InsertPt)->getContext(), NewRHSAP);

755 assert(canBeHoistedTo(LHS, *InsertPt) && "must be");

756 makeAvailableAt(LHS, *InsertPt);

757 Result = new ICmpInst(*InsertPt, Pred, LHS, NewRHS, "wide.chk");

758 }

760 }

761 }

762 }

763 }

764

765 {

767 if (parseRangeChecks(ChecksToWiden, Checks) &&

768 parseRangeChecks(ChecksToHoist, Checks) &&

769 combineRangeChecks(Checks, CombinedChecks)) {

770 if (InsertPt) {

771 for (auto &RC : CombinedChecks) {

772 makeAvailableAt(RC.getCheckInst(), *InsertPt);

773 if (Result)

774 Result = BinaryOperator::CreateAnd(RC.getCheckInst(), Result, "",

775 *InsertPt);

776 else

777 Result = RC.getCheckInst();

778 }

779 assert(Result && "Failed to find result value");

780 Result->setName("wide.chk");

781 Result = freezeAndPush(Result, *InsertPt);

782 }

784 }

785 }

786

787

788 return std::nullopt;

789}

790

791Value *GuardWideningImpl::hoistChecks(SmallVectorImpl<Value *> &ChecksToHoist,

792 Value *OldCondition,

795 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);

796 makeAvailableAt(ChecksToHoist, InsertPt);

797 makeAvailableAt(OldCondition, InsertPt);

798 Value *Result = Builder.CreateAnd(ChecksToHoist);

799 Result = freezeAndPush(Result, InsertPt);

800 Result = Builder.CreateAnd(OldCondition, Result);

801 Result->setName("wide.chk");

803}

804

805bool GuardWideningImpl::parseRangeChecks(

806 Value *CheckCond, SmallVectorImplGuardWideningImpl::RangeCheck &Checks) {

807 using namespace llvm::PatternMatch;

808

810 if (!IC || !IC->getOperand(0)->getType()->isIntegerTy() ||

811 (IC->getPredicate() != ICmpInst::ICMP_ULT &&

812 IC->getPredicate() != ICmpInst::ICMP_UGT))

813 return false;

814

815 const Value *CmpLHS = IC->getOperand(0), *CmpRHS = IC->getOperand(1);

816 if (IC->getPredicate() == ICmpInst::ICMP_UGT)

818

819 auto &DL = IC->getDataLayout();

820

821 GuardWideningImpl::RangeCheck Check(

822 CmpLHS, cast(ConstantInt::getNullValue(CmpRHS->getType())),

823 CmpRHS, IC);

824

826 return false;

827

828

829

830

833

834 do {

836 ConstantInt *OpRHS;

838

839#ifndef NDEBUG

842 "Unreachable instruction?");

843#endif

844

846 Check.setBase(OpLHS);

847 APInt NewOffset = Check.getOffsetValue() + OpRHS->getValue();

848 Check.setOffset(ConstantInt::get(Ctx, NewOffset));

854 Check.setBase(OpLHS);

855 APInt NewOffset = Check.getOffsetValue() + OpRHS->getValue();

856 Check.setOffset(ConstantInt::get(Ctx, NewOffset));

858 }

859 }

861

863 return true;

864}

865

866bool GuardWideningImpl::combineRangeChecks(

867 SmallVectorImplGuardWideningImpl::RangeCheck &Checks,

868 SmallVectorImplGuardWideningImpl::RangeCheck &RangeChecksOut) const {

869 unsigned OldCount = Checks.size();

870 while (!Checks.empty()) {

871

872

873 const Value *CurrentBase = Checks.front().getBase();

874 const Value *CurrentLength = Checks.front().getLength();

875

877

878 auto IsCurrentCheck = [&](GuardWideningImpl::RangeCheck &RC) {

879 return RC.getBase() == CurrentBase && RC.getLength() == CurrentLength;

880 };

881

882 copy_if(Checks, std::back_inserter(CurrentChecks), IsCurrentCheck);

883 erase_if(Checks, IsCurrentCheck);

884

885 assert(CurrentChecks.size() != 0 && "We know we have at least one!");

886

887 if (CurrentChecks.size() < 3) {

889 continue;

890 }

891

892

893

894

895 llvm::sort(CurrentChecks, [&](const GuardWideningImpl::RangeCheck &LHS,

896 const GuardWideningImpl::RangeCheck &RHS) {

897 return LHS.getOffsetValue().slt(RHS.getOffsetValue());

898 });

899

900

901

902 const ConstantInt *MinOffset = CurrentChecks.front().getOffset();

903 const ConstantInt *MaxOffset = CurrentChecks.back().getOffset();

904

906 if ((MaxOffset->getValue() - MinOffset->getValue())

908 return false;

909

910 APInt MaxDiff = MaxOffset->getValue() - MinOffset->getValue();

911 const APInt &HighOffset = MaxOffset->getValue();

912 auto OffsetOK = [&](const GuardWideningImpl::RangeCheck &RC) {

913 return (HighOffset - RC.getOffsetValue()).ult(MaxDiff);

914 };

915

917 return false;

918

919

920

921

922

923

924

925

926

927

928

929

930

931

932

933

934

935

936

937

938

939

940

941

942

943

944

945

946

947

948

949

950

951

952

953

956 }

957

958 assert(RangeChecksOut.size() <= OldCount && "We pessimized!");

959 return RangeChecksOut.size() != OldCount;

960}

961

962#ifndef NDEBUG

963StringRef GuardWideningImpl::scoreTypeToString(WideningScore WS) {

964 switch (WS) {

965 case WS_IllegalOrNegative:

966 return "IllegalOrNegative";

967 case WS_Neutral:

968 return "Neutral";

969 case WS_Positive:

970 return "Positive";

971 case WS_VeryPositive:

972 return "VeryPositive";

973 }

974

976}

977#endif

978

981

983 F.getParent(), Intrinsic::experimental_guard);

984 bool HasIntrinsicGuards = GuardDecl && !GuardDecl->use_empty();

986 F.getParent(), Intrinsic::experimental_widenable_condition);

987 bool HasWidenableConditions = WCDecl && !WCDecl->use_empty();

988 if (!HasIntrinsicGuards && !HasWidenableConditions)

995 std::unique_ptr MSSAU;

996 if (MSSAA)

997 MSSAU = std::make_unique(&MSSAA->getMSSA());

998 if (!GuardWideningImpl(DT, &PDT, LI, AC, MSSAU ? MSSAU.get() : nullptr,

1000 .run())

1002

1006 return PA;

1007}

1008

1012 BasicBlock *RootBB = L.getLoopPredecessor();

1013 if (!RootBB)

1014 RootBB = L.getHeader();

1015 auto BlockFilter = [&](BasicBlock *BB) {

1016 return BB == RootBB || L.contains(BB);

1017 };

1018 std::unique_ptr MSSAU;

1019 if (AR.MSSA)

1020 MSSAU = std::make_unique(AR.MSSA);

1021 if (!GuardWideningImpl(AR.DT, nullptr, AR.LI, AR.AC,

1022 MSSAU ? MSSAU.get() : nullptr, AR.DT.getNode(RootBB),

1023 BlockFilter)

1024 .run())

1026

1028 if (AR.MSSA)

1030 return PA;

1031}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

#define LLVM_DUMP_METHOD

Mark debug helper function definitions like dump() that should not be stripped from debug builds.

This file defines the DenseMap class.

This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.

static cl::opt< bool > WidenBranchGuards("guard-widening-widen-branch-guards", cl::Hidden, cl::desc("Whether or not we should widen guards " "expressed as branches by widenable conditions"), cl::init(true))

static bool isSupportedGuardInstruction(const Instruction *Insn)

Definition GuardWidening.cpp:333

static void eliminateGuard(Instruction *GuardInst, MemorySSAUpdater *MSSAU)

Definition GuardWidening.cpp:106

static std::optional< BasicBlock::iterator > findInsertionPointForWideCondition(Instruction *WCOrGuard)

Find a point at which the widened condition of Guard should be inserted.

Definition GuardWidening.cpp:123

static Value * getCondition(Instruction *I)

Definition GuardWidening.cpp:79

static void setCondition(Instruction *I, Value *NewCond)

Definition GuardWidening.cpp:95

static std::optional< BasicBlock::iterator > getFreezeInsertPt(Value *V, const DominatorTree &DT)

Definition GuardWidening.cpp:604

uint64_t IntrinsicInst * II

const SmallVectorImpl< MachineOperand > & Cond

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

unsigned getBitWidth() const

Return the number of bits in the APInt.

bool isMinValue() const

Determine if this is the smallest unsigned value.

static APInt getSignedMinValue(unsigned numBits)

Gets minimum signed value of APInt for a specific bit width.

PassT::Result * getCachedResult(IRUnitT &IR) const

Get the cached result of an analysis pass for a given IR unit.

PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)

Get the result of an analysis pass for a given IR unit.

A function analysis which provides an AssumptionCache.

LLVM Basic Block Representation.

InstListType::iterator iterator

Instruction iterators...

LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const

Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...

LLVM_ABI const CallInst * getPostdominatingDeoptimizeCall() const

Returns the call instruction calling @llvm.experimental.deoptimize that is present either in current ...

Represents analyses that only rely on functions' control flow.

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)

const APInt & getValue() const

Return the constant as an APInt value reference.

static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)

Produce the exact range such that all values in the returned range satisfy the given predicate with a...

LLVM_ABI std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const

Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...

iterator find(const_arg_type_t< KeyT > Val)

size_type count(const_arg_type_t< KeyT > Val) const

Return 1 if the specified key is in the map, 0 otherwise.

Analysis pass which computes a DominatorTree.

DomTreeNodeBase< NodeT > * getRootNode()

getRootNode - This returns the entry node for the CFG of the function.

DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const

getNode - return the (Post)DominatorTree node for the specified basic block.

bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const

properlyDominates - Returns true iff A dominates B and A != B.

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

LLVM_ABI bool isReachableFromEntry(const Use &U) const

Provide an overload for a Use.

LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const

Return true if the (end of the) basic block BB dominates the use U.

LLVM_ABI void insertBefore(InstListType::iterator InsertPos)

Insert an unlinked instruction into a basic block immediately before the specified position.

LLVM_ABI InstListType::iterator eraseFromParent()

This method unlinks 'this' from the containing basic block and deletes it.

A wrapper class for inspecting calls to intrinsic functions.

This class provides an interface for updating the loop pass manager based on mutations to the loop ne...

Analysis pass that exposes the LoopInfo for a function.

bool contains(const LoopT *L) const

Return true if the specified loop is contained within in this loop.

LoopT * getLoopFor(const BlockT *BB) const

Return the inner most loop that BB lives in.

Represents a single loop in the control flow graph.

An analysis that produces MemorySSA for a function.

LLVM_ABI void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)

Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.

Analysis pass which computes a PostDominatorTree.

LLVM_ABI bool dominates(const Instruction *I1, const Instruction *I2) const

Return true if I1 dominates I2.

A set of analyses that are preserved following a run of a transformation pass.

static PreservedAnalyses all()

Construct a special preserved set that preserves all passes.

PreservedAnalyses & preserveSet()

Mark an analysis set as preserved.

PreservedAnalyses & preserve()

Mark an analysis as preserved.

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

reference emplace_back(ArgTypes &&... Args)

void push_back(const T &Elt)

LLVM Value Representation.

LLVM_ABI LLVMContext & getContext() const

All values hold a context through their type.

std::pair< iterator, bool > insert(const ValueT &V)

size_type count(const_arg_type_t< ValueT > V) const

Return 1 if the specified key is in the set, 0 otherwise.

unsigned getPathLength() const

getPathLength - Return the length of the path from the entry node to the current node,...

NodeRef getPath(unsigned n) const

getPath - Return the n'th node in the path from the entry node to the current node.

const ParentTy * getParent() const

self_iterator getIterator()

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

@ BasicBlock

Various leaf nodes.

LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)

Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.

BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)

bool match(Val *V, const Pattern &P)

specificval_ty m_Specific(const Value *V)

Match if we have a specific specified value.

class_match< ConstantInt > m_ConstantInt()

Match an arbitrary ConstantInt and ignore it.

brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)

class_match< Value > m_Value()

Match an arbitrary value and ignore it.

CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)

class_match< BasicBlock > m_BasicBlock()

Match an arbitrary basic block value and ignore it.

BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)

initializer< Ty > init(const Ty &Val)

PointerTypeMap run(const Module &M)

Compute the PointerTypeMap for the module M.

NodeAddr< DefNode * > Def

NodeAddr< InstrNode * > Instr

NodeAddr< UseNode * > Use

friend class Instruction

Iterator for Instructions in a `BasicBlock.

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)

FunctionAddr VTableAddr Value

auto find(R &&Range, const T &Val)

Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

void append_range(Container &C, Range &&R)

Wrapper function to append range R to container C.

df_iterator< T > df_begin(const T &G)

Value * extractWidenableCondition(const User *U)

void parseWidenableGuard(const User *U, llvm::SmallVectorImpl< Value * > &Checks)

LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)

Return true if the instruction does not have any effects besides calculating the result and does not ...

OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P)

Provide wrappers to std::copy_if which take ranges instead of having to pass begin/end explicitly.

DomTreeNodeBase< BasicBlock > DomTreeNode

AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager

The loop analysis manager.

static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

void setWidenableBranchCond(BranchInst *WidenableBR, Value *Cond)

Given a branch we know is widenable (defined per Analysis/GuardUtils.h), set it's condition such that...

bool isGuard(const User *U)

Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....

bool parseWidenableBranch(const User *U, Value *&Condition, Value *&WidenableCondition, BasicBlock *&IfTrueBB, BasicBlock *&IfFalseBB)

If U is widenable branch looking like: cond = ... wc = call i1 @llvm.experimental....

void sort(IteratorTy Start, IteratorTy End)

LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)

Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)

canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >

bool isGuardAsWidenableBranch(const User *U)

Returns true iff U has semantics of a guard expressed in a form of a widenable conditional branch to ...

DWARFExpression::Operation Op

constexpr unsigned BitWidth

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

LLVM_ABI PreservedAnalyses getLoopPassPreservedAnalyses()

Returns the minimum set of Analyses that all loop passes must preserve.

void erase_if(Container &C, UnaryPredicate P)

Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...

df_iterator< T > df_end(const T &G)

AnalysisManager< Function > FunctionAnalysisManager

Convenience typedef for the Function analysis manager.

LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)

Returns true if V cannot be poison, but may be undef.

LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)

Returns true if the give value is known to be non-negative.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)

Definition GuardWidening.cpp:979

The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...