LLVM: lib/Transforms/Utils/InlineFunction.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

75#include

76#include

77#include

78#include

79#include

80#include

81#include

82#include

83#include

84#include

85

86#define DEBUG_TYPE "inline-function"

87

88using namespace llvm;

91

95 cl::desc("Convert noalias attributes to metadata during inlining."));

96

100 cl::desc("Use the llvm.experimental.noalias.scope.decl "

101 "intrinsic during inlining."));

102

103

104

105

109 cl::desc("Convert align attributes to assumptions during inlining."));

110

112 "max-inst-checked-for-throw-during-inlining", cl::Hidden,

113 cl::desc("the maximum number of instructions analyzed for may throw during "

114 "attribute inference in inlined body"),

116

117namespace {

118

119

120 class LandingPadInliningInfo {

121

123

124

125 BasicBlock *InnerResumeDest = nullptr;

126

127

129

130

131 PHINode *InnerEHValuesPHI = nullptr;

132

134

135 public:

137 : OuterResumeDest(II->getUnwindDest()) {

138

139

140

143 for (; isa(I); ++I) {

144

146 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));

147 }

148

149 CallerLPad = cast(I);

150 }

151

152

153

154 BasicBlock *getOuterResumeDest() const {

155 return OuterResumeDest;

156 }

157

159

160 LandingPadInst *getLandingPadInst() const { return CallerLPad; }

161

162

163

164

165

166

169

170

171

172 void addIncomingPHIValuesFor(BasicBlock *BB) const {

173 addIncomingPHIValuesForInto(BB, OuterResumeDest);

174 }

175

178 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {

180 phi->addIncoming(UnwindDestPHIValues[i], src);

181 }

182 }

183 };

184}

185

188 while (It != BB.end()) {

189 if (auto *IntrinsicCall = dyn_cast(It)) {

190 if (IntrinsicCall->isEntry()) {

191 return IntrinsicCall;

192 }

193 }

194 It = std::next(It);

195 }

196 return nullptr;

197}

198

199

200BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {

201 if (InnerResumeDest) return InnerResumeDest;

202

203

205 InnerResumeDest =

207 OuterResumeDest->getName() + ".body");

208

209

210 const unsigned PHICapacity = 2;

211

212

215 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {

216 PHINode *OuterPHI = cast(I);

218 OuterPHI->getName() + ".lpad-body");

221 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);

222 }

223

224

225 InnerEHValuesPHI =

229 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);

230

231

232 return InnerResumeDest;

233}

234

235

236

237

238

239void LandingPadInliningInfo::forwardResume(

241 BasicBlock *Dest = getInnerResumeDest();

243

245

246

247

248 addIncomingPHIValuesForInto(Src, Dest);

249

252}

253

254

256 if (auto *FPI = dyn_cast(EHPad))

257 return FPI->getParentPad();

258 return cast(EHPad)->getParentPad();

259}

260

262

263

264

268

269 while (!Worklist.empty()) {

271

272

273

274

276 Value *UnwindDestToken = nullptr;

277 if (auto *CatchSwitch = dyn_cast(CurrentPad)) {

278 if (CatchSwitch->hasUnwindDest()) {

279 UnwindDestToken = &*CatchSwitch->getUnwindDest()->getFirstNonPHIIt();

280 } else {

281

282

283

284

285

286

287 for (auto HI = CatchSwitch->handler_begin(),

288 HE = CatchSwitch->handler_end();

289 HI != HE && !UnwindDestToken; ++HI) {

291 auto *CatchPad =

293 for (User *Child : CatchPad->users()) {

294

295

296

297

298 if (!isa(Child) && !isa(Child))

299 continue;

300

301 Instruction *ChildPad = cast(Child);

302 auto Memo = MemoMap.find(ChildPad);

303 if (Memo == MemoMap.end()) {

304

306 continue;

307 }

308

309

310 Value *ChildUnwindDestToken = Memo->second;

311 if (!ChildUnwindDestToken)

312 continue;

313

314

315

316

317 if (isa(ChildUnwindDestToken)) {

318 UnwindDestToken = ChildUnwindDestToken;

319 break;

320 }

322 }

323 }

324 }

325 } else {

326 auto *CleanupPad = cast(CurrentPad);

327 for (User *U : CleanupPad->users()) {

328 if (auto *CleanupRet = dyn_cast(U)) {

329 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())

330 UnwindDestToken = &*RetUnwindDest->getFirstNonPHIIt();

331 else

333 break;

334 }

335 Value *ChildUnwindDestToken;

336 if (auto *Invoke = dyn_cast(U)) {

337 ChildUnwindDestToken = &*Invoke->getUnwindDest()->getFirstNonPHIIt();

338 } else if (isa(U) || isa(U)) {

339 Instruction *ChildPad = cast(U);

340 auto Memo = MemoMap.find(ChildPad);

341 if (Memo == MemoMap.end()) {

342

344 continue;

345 }

346

347

348 ChildUnwindDestToken = Memo->second;

349 if (!ChildUnwindDestToken)

350 continue;

351 } else {

352

353 continue;

354 }

355

356

357

358 if (isa(ChildUnwindDestToken) &&

359 getParentPad(ChildUnwindDestToken) == CleanupPad)

360 continue;

361 UnwindDestToken = ChildUnwindDestToken;

362 break;

363 }

364 }

365

366

367 if (!UnwindDestToken)

368 continue;

369

370

371

372

373

374 Value *UnwindParent;

375 if (auto *UnwindPad = dyn_cast(UnwindDestToken))

377 else

378 UnwindParent = nullptr;

379 bool ExitedOriginalPad = false;

380 for (Instruction *ExitedPad = CurrentPad;

381 ExitedPad && ExitedPad != UnwindParent;

382 ExitedPad = dyn_cast(getParentPad(ExitedPad))) {

383

384 if (isa(ExitedPad))

385 continue;

386 MemoMap[ExitedPad] = UnwindDestToken;

387 ExitedOriginalPad |= (ExitedPad == EHPad);

388 }

389

390 if (ExitedOriginalPad)

391 return UnwindDestToken;

392

393

394 }

395

396

397 return nullptr;

398}

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

419

420

421

422 if (auto *CPI = dyn_cast(EHPad))

423 EHPad = CPI->getCatchSwitch();

424

425

426 auto Memo = MemoMap.find(EHPad);

427 if (Memo != MemoMap.end())

428 return Memo->second;

429

430

432 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));

433 if (UnwindDestToken)

434 return UnwindDestToken;

435

436

437

438

439

440

441 MemoMap[EHPad] = nullptr;

442#ifndef NDEBUG

444 TempMemos.insert(EHPad);

445#endif

447 Value *AncestorToken;

449 auto *AncestorPad = dyn_cast(AncestorToken);

450 AncestorToken = getParentPad(AncestorToken)) {

451

452 if (isa(AncestorPad))

453 continue;

454

455

456

457

458

459

460

461 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);

462 auto AncestorMemo = MemoMap.find(AncestorPad);

463 if (AncestorMemo == MemoMap.end()) {

465 } else {

466 UnwindDestToken = AncestorMemo->second;

467 }

468 if (UnwindDestToken)

469 break;

470 LastUselessPad = AncestorPad;

471 MemoMap[LastUselessPad] = nullptr;

472#ifndef NDEBUG

473 TempMemos.insert(LastUselessPad);

474#endif

475 }

476

477

478

479

480

481

482

483

484

485

486

487

488

489

491 while (!Worklist.empty()) {

493 auto Memo = MemoMap.find(UselessPad);

494 if (Memo != MemoMap.end() && Memo->second) {

495

496

497

498

499

500

501

503 continue;

504 }

505

506

507

508

509

510

511

512

513

514 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));

515

516

517

518

519

520

521

522 MemoMap[UselessPad] = UnwindDestToken;

523 if (auto *CatchSwitch = dyn_cast(UselessPad)) {

524 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");

525 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {

526 auto *CatchPad = &*HandlerBlock->getFirstNonPHIIt();

527 for (User *U : CatchPad->users()) {

528 assert((!isa(U) ||

530 ->getUnwindDest()

531 ->getFirstNonPHIIt()) == CatchPad)) &&

532 "Expected useless pad");

533 if (isa(U) || isa(U))

534 Worklist.push_back(cast(U));

535 }

536 }

537 } else {

538 assert(isa(UselessPad));

539 for (User *U : UselessPad->users()) {

540 assert(!isa(U) && "Expected useless pad");

542 (!isa(U) ||

544 &*cast(U)->getUnwindDest()->getFirstNonPHIIt()) ==

545 UselessPad)) &&

546 "Expected useless pad");

547 if (isa(U) || isa(U))

548 Worklist.push_back(cast(U));

549 }

550 }

551 }

552

553 return UnwindDestToken;

554}

555

556

557

558

559

560

565

566

567 CallInst *CI = dyn_cast(&I);

568

570 continue;

571

572

573

574

575

576

577

579 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||

580 F->getIntrinsicID() == Intrinsic::experimental_guard)

581 continue;

582

584

585

586

587

588

589

590

591 auto *FuncletPad = cast(FuncletBundle->Inputs[0]);

592 Value *UnwindDestToken =

594 if (UnwindDestToken && !isa(UnwindDestToken))

595 continue;

596#ifndef NDEBUG

598 if (auto *CatchPad = dyn_cast(FuncletPad))

599 MemoKey = CatchPad->getCatchSwitch();

600 else

601 MemoKey = FuncletPad;

602 assert(FuncletUnwindMap->count(MemoKey) &&

603 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&

604 "must get memoized to avoid confusing later searches");

605#endif

606 }

607

609 return BB;

610 }

611 return nullptr;

612}

613

614

615

616

617

618

619

622 BasicBlock *InvokeDest = II->getUnwindDest();

623

625

626

627

628

629 LandingPadInliningInfo Invoke(II);

630

631

634 I != E; ++I)

635 if (InvokeInst *II = dyn_cast(I->getTerminator()))

636 InlinedLPads.insert(II->getLandingPadInst());

637

638

639

640 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();

643 InlinedLPad->reserveClauses(OuterNum);

644 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)

645 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));

647 InlinedLPad->setCleanup(true);

648 }

649

651 BB != E; ++BB) {

654 &*BB, Invoke.getOuterResumeDest()))

655

656

657 Invoke.addIncomingPHIValuesFor(NewBB);

658

659

660 if (ResumeInst *RI = dyn_cast(BB->getTerminator()))

661 Invoke.forwardResume(RI, InlinedLPads);

662 }

663

664

665

666

667

669}

670

671

672

673

674

675

676

679 BasicBlock *UnwindDest = II->getUnwindDest();

681

683

684

685

686

690

691 UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB));

692 }

693

694

695

698 for (Value *V : UnwindDestPHIValues) {

700 PHI->addIncoming(V, Src);

701 ++I;

702 }

703 };

704

705

706

709 BB != E; ++BB) {

710 if (auto *CRI = dyn_cast(BB->getTerminator())) {

711 if (CRI->unwindsToCaller()) {

712 auto *CleanupPad = CRI->getCleanupPad();

714 CRI->eraseFromParent();

716

717

718

719

720 assert(!FuncletUnwindMap.count(CleanupPad) ||

721 isa(FuncletUnwindMap[CleanupPad]));

722 FuncletUnwindMap[CleanupPad] =

724 }

725 }

726

728 if (I->isEHPad())

729 continue;

730

732 if (auto *CatchSwitch = dyn_cast(I)) {

733 if (CatchSwitch->unwindsToCaller()) {

734 Value *UnwindDestToken;

735 if (auto *ParentPad =

736 dyn_cast(CatchSwitch->getParentPad())) {

737

738

739

740

741

742

743

744

746 if (UnwindDestToken && !isa(UnwindDestToken))

747 continue;

748 } else {

749

750

751

752

753

754

755

757 }

759 CatchSwitch->getParentPad(), UnwindDest,

760 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),

761 CatchSwitch->getIterator());

762 for (BasicBlock *PadBB : CatchSwitch->handlers())

763 NewCatchSwitch->addHandler(PadBB);

764

765

766

767

768 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;

769 Replacement = NewCatchSwitch;

770 }

771 } else if (!isa(I)) {

773 }

774

775 if (Replacement) {

777 I->replaceAllUsesWith(Replacement);

778 I->eraseFromParent();

780 }

781 }

782

785 E = Caller->end();

786 BB != E; ++BB)

788 &*BB, UnwindDest, &FuncletUnwindMap))

789

790

792

793

794

795

796

798}

799

801 MDNode *CallsiteStackContext) {

804

805

806

807 for (auto MIBStackIter = MIBStackContext->op_begin(),

808 CallsiteStackIter = CallsiteStackContext->op_begin();

809 MIBStackIter != MIBStackContext->op_end() &&

810 CallsiteStackIter != CallsiteStackContext->op_end();

811 MIBStackIter++, CallsiteStackIter++) {

812 auto *Val1 = mdconst::dyn_extract(*MIBStackIter);

813 auto *Val2 = mdconst::dyn_extract(*CallsiteStackIter);

814 assert(Val1 && Val2);

815 if (Val1->getZExtValue() != Val2->getZExtValue())

816 return false;

817 }

818 return true;

819}

820

822 Call->setMetadata(LLVMContext::MD_memprof, nullptr);

823}

824

826 Call->setMetadata(LLVMContext::MD_callsite, nullptr);

827}

828

830 const std::vector<Metadata *> &MIBList) {

831 assert(!MIBList.empty());

832

833

836 for (Metadata *MIB : MIBList)

837 CallStack.addCallStack(cast(MIB));

838 bool MemprofMDAttached = CallStack.buildAndAttachMIBMetadata(CI);

839 assert(MemprofMDAttached == CI->hasMetadata(LLVMContext::MD_memprof));

840 if (!MemprofMDAttached)

841

843}

844

845

846

847

850 MDNode *InlinedCallsiteMD) {

851 MDNode *OrigCallsiteMD = ClonedCall->getMetadata(LLVMContext::MD_callsite);

852 MDNode *ClonedCallsiteMD = nullptr;

853

854

855 if (OrigCallsiteMD) {

856

857

858

859 ClonedCallsiteMD = MDNode::concatenate(OrigCallsiteMD, InlinedCallsiteMD);

860 ClonedCall->setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);

861 }

862

863

864 MDNode *OrigMemProfMD = ClonedCall->getMetadata(LLVMContext::MD_memprof);

865 if (!OrigMemProfMD)

866 return;

867

868

869 assert(OrigCallsiteMD);

870

871

872 std::vector<Metadata *> NewMIBList;

873

874

875

876

877 for (auto &MIBOp : OrigMemProfMD->operands()) {

878 MDNode *MIB = dyn_cast(MIBOp);

879

882

884

885 NewMIBList.push_back(MIB);

886 }

887 if (NewMIBList.empty()) {

890 return;

891 }

892 if (NewMIBList.size() < OrigMemProfMD->getNumOperands())

894}

895

896

897

898

899

900

901static void

903 bool ContainsMemProfMetadata,

906

907

908 if (!CallsiteMD && !ContainsMemProfMetadata)

909 return;

910

911

912 for (const auto &Entry : VMap) {

913

914

915 auto *OrigCall = dyn_cast_or_null(Entry.first);

916 auto *ClonedCall = dyn_cast_or_null(Entry.second);

917 if (!OrigCall || !ClonedCall)

918 continue;

919

920

921

922 if (!CallsiteMD) {

925 continue;

926 }

928 }

929}

930

931

932

933

936 MDNode *MemParallelLoopAccess =

937 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);

938 MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);

939 MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);

941 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)

942 return;

943

946

947 if (I.mayReadOrWriteMemory())

948 continue;

949

950 if (MemParallelLoopAccess) {

951

953 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),

954 MemParallelLoopAccess);

955 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,

956 MemParallelLoopAccess);

957 }

958

959 if (AccessGroup)

961 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));

962

963 if (AliasScope)

965 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));

966

967 if (NoAlias)

969 I.getMetadata(LLVMContext::MD_noalias), NoAlias));

970 }

971 }

972}

973

974

979 if (I)

980 continue;

981

983 continue;

984

985

986 auto *CalledFn =

987 dyn_cast(I->getCalledOperand()->stripPointerCasts());

988 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow() &&

990 continue;

991

993 I->getOperandBundlesAsDefs(OpBundles);

994 OpBundles.emplace_back("funclet", CallSiteEHPad);

995

998 I->replaceAllUsesWith(NewInst);

999 I->eraseFromParent();

1000 }

1001}

1002

1003namespace {

1004

1005

1006

1007

1008class ScopedAliasMetadataDeepCloner {

1011 MetadataMap MDMap;

1012 void addRecursiveMetadataUses();

1013

1014public:

1015 ScopedAliasMetadataDeepCloner(const Function *F);

1016

1017

1018

1019 void clone();

1020

1021

1022

1024};

1025}

1026

1027ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(

1031 if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))

1032 MD.insert(M);

1033 if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))

1034 MD.insert(M);

1035

1036

1037 if (const auto *Decl = dyn_cast(&I))

1038 MD.insert(Decl->getScopeList());

1039 }

1040 }

1041 addRecursiveMetadataUses();

1042}

1043

1044void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {

1046 while (Queue.empty()) {

1047 const MDNode *M = cast(Queue.pop_back_val());

1048 for (const Metadata *Op : M->operands())

1049 if (const MDNode *OpMD = dyn_cast(Op))

1050 if (MD.insert(OpMD))

1051 Queue.push_back(OpMD);

1052 }

1053}

1054

1055void ScopedAliasMetadataDeepCloner::clone() {

1056 assert(MDMap.empty() && "clone() already called ?");

1057

1059 for (const MDNode *I : MD) {

1061 MDMap[I].reset(DummyNodes.back().get());

1062 }

1063

1064

1065

1066

1068 for (const MDNode *I : MD) {

1069 for (const Metadata *Op : I->operands()) {

1070 if (const MDNode *M = dyn_cast(Op))

1072 else

1074 }

1075

1077 MDTuple *TempM = cast(MDMap[I]);

1079

1082 }

1083}

1084

1087 if (MDMap.empty())

1088 return;

1089

1092

1093

1094 if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))

1095 if (MDNode *MNew = MDMap.lookup(M))

1096 I.setMetadata(LLVMContext::MD_alias_scope, MNew);

1097

1098 if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))

1099 if (MDNode *MNew = MDMap.lookup(M))

1100 I.setMetadata(LLVMContext::MD_noalias, MNew);

1101

1102 if (auto *Decl = dyn_cast(&I))

1103 if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))

1104 Decl->setScopeList(MNew);

1105 }

1106 }

1107}

1108

1109

1110

1111

1112

1117 return;

1118

1121

1122 for (const Argument &Arg : CalledFunc->args())

1123 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())

1125

1126 if (NoAliasArgs.empty())

1127 return;

1128

1129

1130

1133

1134

1135

1136

1137

1138

1139

1142

1143

1146 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {

1147 const Argument *A = NoAliasArgs[i];

1148

1149 std::string Name = std::string(CalledFunc->getName());

1150 if (A->hasName()) {

1151 Name += ": %";

1152 Name += A->getName();

1153 } else {

1154 Name += ": argument ";

1155 Name += utostr(i);

1156 }

1157

1158

1159

1160

1162 NewScopes.insert(std::make_pair(A, NewScope));

1163

1165

1166

1168 auto *NoAliasDecl =

1170

1171

1172 (void)NoAliasDecl;

1173 }

1174 }

1175

1176

1177

1179 VMI != VMIE; ++VMI) {

1180 if (const Instruction *I = dyn_cast(VMI->first)) {

1181 if (!VMI->second)

1182 continue;

1183

1184 Instruction *NI = dyn_cast(VMI->second);

1185 if (!NI || InlinedFunctionInfo.isSimplified(I, NI))

1186 continue;

1187

1188 bool IsArgMemOnlyCall = false, IsFuncCall = false;

1190

1191 if (const LoadInst *LI = dyn_cast(I))

1192 PtrArgs.push_back(LI->getPointerOperand());

1193 else if (const StoreInst *SI = dyn_cast(I))

1194 PtrArgs.push_back(SI->getPointerOperand());

1195 else if (const VAArgInst *VAAI = dyn_cast(I))

1196 PtrArgs.push_back(VAAI->getPointerOperand());

1197 else if (const AtomicCmpXchgInst *CXI = dyn_cast(I))

1198 PtrArgs.push_back(CXI->getPointerOperand());

1199 else if (const AtomicRMWInst *RMWI = dyn_cast(I))

1200 PtrArgs.push_back(RMWI->getPointerOperand());

1201 else if (const auto *Call = dyn_cast(I)) {

1202

1203

1204

1205 if (Call->doesNotAccessMemory())

1206 continue;

1207

1208 IsFuncCall = true;

1209 if (CalleeAAR) {

1211

1212

1214 continue;

1215

1217 IsArgMemOnlyCall = true;

1218 }

1219

1220 for (Value *Arg : Call->args()) {

1221

1222

1223

1224 if (!Arg->getType()->isPointerTy())

1225 continue;

1226

1228 }

1229 }

1230

1231

1232

1233

1234

1235 if (PtrArgs.empty() && !IsFuncCall)

1236 continue;

1237

1238

1239

1240

1243

1244 for (const Value *V : PtrArgs) {

1247

1248 for (const Value *O : Objects)

1250 }

1251

1252

1253

1254 bool RequiresNoCaptureBefore = false, UsesAliasingPtr = false,

1255 UsesUnknownObject = false;

1256 for (const Value *V : ObjSet) {

1257

1258

1259

1260 bool IsNonPtrConst = isa(V) || isa(V) ||

1261 isa(V) ||

1262 isa(V) || isa(V);

1263 if (IsNonPtrConst)

1264 continue;

1265

1266

1267

1268

1269 if (const Argument *A = dyn_cast(V)) {

1270 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))

1271 UsesAliasingPtr = true;

1272 } else {

1273 UsesAliasingPtr = true;

1274 }

1275

1277

1278

1279 RequiresNoCaptureBefore = true;

1281

1282

1283

1284

1285 UsesUnknownObject = true;

1286 }

1287 }

1288

1289

1290

1291 if (UsesUnknownObject)

1292 continue;

1293

1294

1295

1296 if (IsFuncCall && !IsArgMemOnlyCall)

1297 RequiresNoCaptureBefore = true;

1298

1299

1300

1301

1302

1303

1304

1305

1306

1307 for (const Argument *A : NoAliasArgs) {

1309 continue;

1310

1311

1312

1313

1314

1315 if (!RequiresNoCaptureBefore ||

1317 false, I, &DT))

1319 }

1320

1322 NI->setMetadata(LLVMContext::MD_noalias,

1324 NI->getMetadata(LLVMContext::MD_noalias),

1326

1327

1328

1329

1330

1331

1332

1333

1334

1335

1336

1337 bool CanAddScopes = !UsesAliasingPtr;

1338 if (CanAddScopes && IsFuncCall)

1339 CanAddScopes = IsArgMemOnlyCall;

1340

1341 if (CanAddScopes)

1342 for (const Argument *A : NoAliasArgs) {

1343 if (ObjSet.count(A))

1344 Scopes.push_back(NewScopes[A]);

1345 }

1346

1347 if (!Scopes.empty())

1349 LLVMContext::MD_alias_scope,

1352 }

1353 }

1354}

1355

1358

1360 "Expected to be in same basic block!");

1362 assert(BeginIt != End->getIterator() && "Non-empty BB has empty iterator");

1365}

1366

1367

1368

1373 auto &Context = CalledFunction->getContext();

1374

1375

1377 bool HasAttrToPropagate = false;

1378

1379

1380

1381

1382

1383

1385 Attribute::Dereferenceable, Attribute::DereferenceableOrNull,

1386 Attribute::NonNull, Attribute::Alignment, Attribute::Range};

1387

1388 for (unsigned I = 0, E = CB.arg_size(); I < E; ++I) {

1391

1392

1394 ValidObjParamAttrs.back().addAttribute(Attribute::ReadNone);

1396 ValidObjParamAttrs.back().addAttribute(Attribute::ReadOnly);

1397

1401 ValidExactParamAttrs.back().addAttribute(Attr);

1402 }

1403

1404 HasAttrToPropagate |= ValidObjParamAttrs.back().hasAttributes();

1405 HasAttrToPropagate |= ValidExactParamAttrs.back().hasAttributes();

1406 }

1407

1408

1409 if (!HasAttrToPropagate)

1410 return;

1411

1412 for (BasicBlock &BB : *CalledFunction) {

1414 const auto *InnerCB = dyn_cast(&Ins);

1415 if (!InnerCB)

1416 continue;

1417 auto *NewInnerCB = dyn_cast_or_null(VMap.lookup(InnerCB));

1418 if (!NewInnerCB)

1419 continue;

1420

1421

1422 if (InlinedFunctionInfo.isSimplified(InnerCB, NewInnerCB))

1423 continue;

1424

1426 for (unsigned I = 0, E = InnerCB->arg_size(); I < E; ++I) {

1427

1428

1429

1430

1431 if (NewInnerCB->paramHasAttr(I, Attribute::ByVal))

1432 continue;

1433

1434

1435 if (match(NewInnerCB->getArgOperand(I),

1437 continue;

1438

1439

1440 const Argument *Arg = dyn_cast(InnerCB->getArgOperand(I));

1441 unsigned ArgNo;

1442 if (Arg) {

1444

1445

1446

1447

1448

1450 Context, AttributeSet::get(Context, ValidExactParamAttrs[ArgNo])};

1451 if (AL.getParamDereferenceableBytes(I) >

1452 NewAB.getDereferenceableBytes())

1454 if (AL.getParamDereferenceableOrNullBytes(I) >

1455 NewAB.getDereferenceableOrNullBytes())

1456 NewAB.removeAttribute(Attribute::DereferenceableOrNull);

1457 if (AL.getParamAlignment(I).valueOrOne() >

1458 NewAB.getAlignment().valueOrOne())

1460 if (auto ExistingRange = AL.getParamRange(I)) {

1461 if (auto NewRange = NewAB.getRange()) {

1464 NewAB.removeAttribute(Attribute::Range);

1465 NewAB.addRangeAttr(CombinedRange);

1466 }

1467 }

1468 AL = AL.addParamAttributes(Context, I, NewAB);

1469 } else if (NewInnerCB->getArgOperand(I)->getType()->isPointerTy()) {

1470

1471 const Value *UnderlyingV =

1473 Arg = dyn_cast(UnderlyingV);

1474 if (!Arg)

1475 continue;

1477 } else {

1478 continue;

1479 }

1480

1481

1482 AL = AL.addParamAttributes(Context, I, ValidObjParamAttrs[ArgNo]);

1483

1484

1485

1486

1487

1488

1489 if (AL.hasParamAttr(I, Attribute::ReadOnly) &&

1490 AL.hasParamAttr(I, Attribute::WriteOnly))

1491 AL = AL.addParamAttribute(Context, I, Attribute::ReadNone);

1492

1493

1494 if (AL.hasParamAttr(I, Attribute::ReadNone)) {

1495 AL = AL.removeParamAttribute(Context, I, Attribute::ReadOnly);

1496 AL = AL.removeParamAttribute(Context, I, Attribute::WriteOnly);

1497 }

1498

1499

1500 if (AL.hasParamAttr(I, Attribute::ReadOnly) ||

1501 AL.hasParamAttr(I, Attribute::ReadNone))

1502 AL = AL.removeParamAttribute(Context, I, Attribute::Writable);

1503 }

1504 NewInnerCB->setAttributes(AL);

1505 }

1506 }

1507}

1508

1509

1510

1511

1512

1513

1514

1521 if (CB.hasRetAttr(Attribute::NoAlias))

1523 if (CB.hasRetAttr(Attribute::NoUndef))

1525 return Valid;

1526}

1527

1528

1529

1532 if (CB.hasRetAttr(Attribute::NonNull))

1534 if (CB.hasRetAttr(Attribute::Alignment))

1536 if (std::optional Range = CB.getRange())

1538 return Valid;

1539}

1540

1546 return;

1548 auto &Context = CalledFunction->getContext();

1549

1550 for (auto &BB : *CalledFunction) {

1551 auto *RI = dyn_cast(BB.getTerminator());

1552 if (!RI || !isa(RI->getOperand(0)))

1553 continue;

1554 auto *RetVal = cast(RI->getOperand(0));

1555

1556

1557

1558 auto *NewRetVal = dyn_cast_or_null(VMap.lookup(RetVal));

1559 if (!NewRetVal)

1560 continue;

1561

1562

1563

1564 if (InlinedFunctionInfo.isSimplified(RetVal, NewRetVal))

1565 continue;

1566

1567

1568

1569

1570

1571

1572

1573

1574

1575

1576

1577

1578

1579

1580

1581

1582

1583

1584 if (RI->getParent() != RetVal->getParent() ||

1586 continue;

1587

1588

1589

1590

1591

1592

1597 AL.getRetDereferenceableOrNullBytes())

1598 ValidUB.removeAttribute(Attribute::DereferenceableOrNull);

1599 AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);

1600

1601

1602

1603

1604

1605

1606

1607

1608

1609

1610

1611

1612

1613

1614

1615

1616

1617

1618

1619

1620

1621

1622

1623

1624

1625

1626

1627

1628

1629

1630

1631

1636 if (CBRange.isValid()) {

1637 Attribute NewRange = AL.getRetAttr(Attribute::Range);

1638 if (NewRange.isValid()) {

1641 }

1642 }

1643

1644

1645

1646

1647

1648

1649

1650

1651

1652

1653

1654 if (CB.hasRetAttr(Attribute::NoUndef) ||

1655 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))

1657 }

1658 NewRetVal->setAttributes(NewAL);

1659 }

1660}

1661

1662

1663

1666 return;

1667

1670

1671

1672

1674 bool DTCalculated = false;

1675

1678 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||

1679 Arg.hasNUses(0))

1680 continue;

1681 MaybeAlign Alignment = Arg.getParamAlign();

1682 if (!Alignment)

1683 continue;

1684

1685 if (!DTCalculated) {

1687 DTCalculated = true;

1688 }

1689

1690

1693 continue;

1694

1696 DL, ArgVal, Alignment->value());

1698 }

1699}

1700

1706

1708 Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));

1709

1710

1711

1712

1715

1716

1717

1718

1721 CI->setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));

1722}

1723

1724

1725

1732 const DataLayout &DL = Caller->getDataLayout();

1733

1734

1735

1736

1738

1739

1740

1741 if (ByValAlignment.valueOrOne() == 1)

1742 return Arg;

1743

1746

1747

1748

1750 *ByValAlignment)

1751 return Arg;

1752

1753

1754

1755 }

1756

1757

1758 Align Alignment = DL.getPrefTypeAlign(ByValType);

1759

1760

1761

1762

1763 if (ByValAlignment)

1764 Alignment = std::max(Alignment, *ByValAlignment);

1765

1768 nullptr, Alignment, Arg->getName());

1769 NewAlloca->insertBefore(Caller->begin()->begin());

1771

1772

1773

1774 return NewAlloca;

1775}

1776

1777

1779 for (User *U : V->users())

1781 if (II->isLifetimeStartOrEnd())

1782 return true;

1783 return false;

1784}

1785

1786

1787

1790 Type *Int8PtrTy =

1792 if (Ty == Int8PtrTy)

1794

1795

1797 if (U->getType() != Int8PtrTy) continue;

1798 if (U->stripPointerCasts() != AI) continue;

1800 return true;

1801 }

1802 return false;

1803}

1804

1805

1806

1807

1810}

1811

1812

1813

1818 return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),

1820}

1821

1822

1823

1825 Instruction *TheCall, bool CalleeHasDebugInfo) {

1827 if (!TheCallDL)

1828 return;

1829

1831 DILocation *InlinedAtNode = TheCallDL;

1832

1833

1834

1835 InlinedAtNode = DILocation::getDistinct(

1836 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),

1837 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());

1838

1839

1840

1841

1843

1844

1845

1846 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");

1847

1848

1850

1851

1852 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,

1854 if (auto *Loc = dyn_cast_or_null(MD))

1856 return MD;

1857 };

1859

1860 if (!NoInlineLineTables)

1864 I.setDebugLoc(IDL);

1865 return;

1866 }

1867

1868 if (CalleeHasDebugInfo && !NoInlineLineTables)

1869 return;

1870

1871

1872

1873

1874

1875

1876

1877

1878 if (auto *AI = dyn_cast(&I))

1880 return;

1881

1882

1883

1884

1885 if (isa(I))

1886 return;

1887

1888 I.setDebugLoc(TheCallDL);

1889 };

1890

1891

1892 auto UpdateDVR = [&](DbgRecord *DVR) {

1893 assert(DVR->getDebugLoc() && "Debug Value must have debug loc");

1894 if (NoInlineLineTables) {

1895 DVR->setDebugLoc(TheCallDL);

1896 return;

1897 }

1901 DVR->getMarker()->getParent()->getContext(), IANodes);

1902 DVR->setDebugLoc(IDL);

1903 };

1904

1905

1906 for (; FI != Fn->end(); ++FI) {

1908 UpdateInst(I);

1909 for (DbgRecord &DVR : I.getDbgRecordRange()) {

1910 UpdateDVR(&DVR);

1911 }

1912 }

1913

1914

1915 if (NoInlineLineTables) {

1917 while (BI != FI->end()) {

1918 if (isa(BI)) {

1919 BI = BI->eraseFromParent();

1920 continue;

1921 } else {

1922 BI->dropDbgRecords();

1923 }

1924 ++BI;

1925 }

1926 }

1927 }

1928}

1929

1930#undef DEBUG_TYPE

1931#define DEBUG_TYPE "assignment-tracking"

1932

1937

1939 errs() << "# Finding caller local variables escaped by callee\n");

1940 for (const Value *Arg : CB.args()) {

1942 if (!Arg->getType()->isPointerTy()) {

1944 continue;

1945 }

1946

1947 const Instruction *I = dyn_cast(Arg);

1948 if (I) {

1949 LLVM_DEBUG(errs() << " | SKIP: Not result of instruction\n");

1950 continue;

1951 }

1952

1953

1954 assert(Arg->getType()->isPtrOrPtrVectorTy());

1955 APInt TmpOffset(DL.getIndexTypeSizeInBits(Arg->getType()), 0, false);

1957 Arg->stripAndAccumulateConstantOffsets(DL, TmpOffset, true));

1958 if (Base) {

1959 LLVM_DEBUG(errs() << " | SKIP: Couldn't walk back to base storage\n");

1960 continue;

1961 }

1962

1965

1967 continue;

1968

1969

1970 auto CollectAssignsForStorage = [&](auto *DbgAssign) {

1971

1972 if (DbgAssign->getDebugLoc().getInlinedAt())

1973 return;

1974 LLVM_DEBUG(errs() << " > DEF : " << *DbgAssign << "\n");

1976 };

1979 }

1980 return EscapedLocals;

1981}

1982

1986 << Start->getParent()->getName() << " from "

1990}

1991

1992

1993

1994

1997

1998

1999 for (auto BBI = Start; BBI != End; ++BBI) {

2002 }

2003}

2004#undef DEBUG_TYPE

2005#define DEBUG_TYPE "inline-function"

2006

2007

2008

2009

2010

2011

2012

2017 const BasicBlock &CalleeEntryBlock) {

2019 for (auto Entry : VMap) {

2020 if (!isa(Entry.first) || !Entry.second)

2021 continue;

2022 auto *OrigBB = cast(Entry.first);

2023 auto *ClonedBB = cast(Entry.second);

2025 if (!ClonedBBs.insert(ClonedBB).second) {

2026

2027

2028

2030 if (NewFreq > Freq)

2031 Freq = NewFreq;

2032 }

2034 }

2035 BasicBlock *EntryClone = cast(VMap.lookup(&CalleeEntryBlock));

2037 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock), ClonedBBs);

2038}

2039

2040

2046 return;

2047 auto CallSiteCount =

2048 PSI ? PSI->getProfileCount(TheCall, CallerBFI) : std::nullopt;

2049 int64_t CallCount =

2050 std::min(CallSiteCount.value_or(0), CalleeEntryCount.getCount());

2052}

2053

2055 Function *Callee, int64_t EntryDelta,

2057 auto CalleeCount = Callee->getEntryCount();

2058 if (!CalleeCount)

2059 return;

2060

2061 const uint64_t PriorEntryCount = CalleeCount->getCount();

2062

2063

2064

2065 const uint64_t NewEntryCount =

2066 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)

2067 ? 0

2068 : PriorEntryCount + EntryDelta;

2069

2070 auto updateVTableProfWeight = [](CallBase *CB, const uint64_t NewEntryCount,

2071 const uint64_t PriorEntryCount) {

2073 if (VPtr)

2074 scaleProfData(*VPtr, NewEntryCount, PriorEntryCount);

2075 };

2076

2077

2078 if (VMap) {

2079 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;

2080 for (auto Entry : *VMap) {

2081 if (isa(Entry.first))

2082 if (auto *CI = dyn_cast_or_null(Entry.second)) {

2083 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);

2084 updateVTableProfWeight(CI, CloneEntryCount, PriorEntryCount);

2085 }

2086

2087 if (isa(Entry.first))

2088 if (auto *II = dyn_cast_or_null(Entry.second)) {

2089 II->updateProfWeight(CloneEntryCount, PriorEntryCount);

2090 updateVTableProfWeight(II, CloneEntryCount, PriorEntryCount);

2091 }

2092 }

2093 }

2094

2095 if (EntryDelta) {

2096 Callee->setEntryCount(NewEntryCount);

2097

2099

2100 if (!VMap || VMap->count(&BB))

2102 if (CallInst *CI = dyn_cast(&I)) {

2103 CI->updateProfWeight(NewEntryCount, PriorEntryCount);

2104 updateVTableProfWeight(CI, NewEntryCount, PriorEntryCount);

2105 }

2106 if (InvokeInst *II = dyn_cast(&I)) {

2107 II->updateProfWeight(NewEntryCount, PriorEntryCount);

2108 updateVTableProfWeight(II, NewEntryCount, PriorEntryCount);

2109 }

2110 }

2111 }

2112}

2113

2114

2115

2116

2117

2118

2119

2120

2121

2122

2123

2124

2125

2126

2127

2128

2129

2130

2131static void

2135 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,

2136 IsUnsafeClaimRV = !IsRetainRV;

2137

2138 for (auto *RI : Returns) {

2140 bool InsertRetainCall = IsRetainRV;

2142

2143

2144

2148

2149 if (isa(I))

2150 continue;

2151

2152 if (auto *II = dyn_cast(&I)) {

2153 if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||

2154 II->hasNUses(0) ||

2156 break;

2157

2158

2159

2160

2161

2162

2163 if (IsUnsafeClaimRV) {

2165 Builder.CreateIntrinsic(Intrinsic::objc_release, {}, RetOpnd);

2166 }

2167 II->eraseFromParent();

2168 InsertRetainCall = false;

2169 break;

2170 }

2171

2172 auto *CI = dyn_cast(&I);

2173

2174 if (!CI)

2175 break;

2176

2179 break;

2180

2181

2182

2187 NewCall->copyMetadata(*CI);

2188 CI->replaceAllUsesWith(NewCall);

2189 CI->eraseFromParent();

2190 InsertRetainCall = false;

2191 break;

2192 }

2193

2194 if (InsertRetainCall) {

2195

2196

2197

2199 Builder.CreateIntrinsic(Intrinsic::objc_retain, {}, RetOpnd);

2200 }

2201 }

2202}

2203

2204

2205

2206

2207

2208

2209

2210

2211

2212

2213

2214

2215

2216

2217

2218

2219

2220

2221

2222

2223

2224

2225static const std::pair<std::vector<int64_t>, std::vector<int64_t>>

2229

2230

2231

2232

2233 std::vector<int64_t> CalleeCounterMap;

2234 std::vector<int64_t> CalleeCallsiteMap;

2235 CalleeCounterMap.resize(CalleeCounters, -1);

2236 CalleeCallsiteMap.resize(CalleeCallsites, -1);

2237

2239 if (Ins.getNameValue() == &Caller)

2240 return false;

2241 const auto OldID = static_cast<uint32_t>(Ins.getIndex()->getZExtValue());

2242 if (CalleeCounterMap[OldID] == -1)

2244 const auto NewID = static_cast<uint32_t>(CalleeCounterMap[OldID]);

2245

2246 Ins.setNameValue(&Caller);

2247 Ins.setIndex(NewID);

2248 return true;

2249 };

2250

2251 auto RewriteCallsiteInsIfNeeded = [&](InstrProfCallsite &Ins) -> bool {

2252 if (Ins.getNameValue() == &Caller)

2253 return false;

2254 const auto OldID = static_cast<uint32_t>(Ins.getIndex()->getZExtValue());

2255 if (CalleeCallsiteMap[OldID] == -1)

2257 const auto NewID = static_cast<uint32_t>(CalleeCallsiteMap[OldID]);

2258

2259 Ins.setNameValue(&Caller);

2260 Ins.setIndex(NewID);

2261 return true;

2262 };

2263

2264 std::deque<BasicBlock *> Worklist;

2266

2267

2268

2269

2270

2271

2272

2273

2274

2275

2276

2277

2278

2279

2280

2281 Worklist.push_back(StartBB);

2282 while (!Worklist.empty()) {

2283 auto *BB = Worklist.front();

2284 Worklist.pop_front();

2285 bool Changed = false;

2287 if (BBID) {

2288 Changed |= RewriteInstrIfNeeded(*BBID);

2289

2290

2291

2292 BBID->moveBefore(BB->getFirstInsertionPt());

2293 }

2295 if (auto *Inc = dyn_cast(&I)) {

2296 if (isa(Inc)) {

2297

2298

2299

2300

2301

2302

2303 if (isa(Inc->getStep())) {

2304 assert(!Inc->getNextNode() || !isa(Inc->getNextNode()));

2305 Inc->eraseFromParent();

2306 } else {

2307 assert(isa_and_nonnull(Inc->getNextNode()));

2308 RewriteInstrIfNeeded(*Inc);

2309 }

2310 } else if (Inc != BBID) {

2311

2312

2313

2314

2315 Inc->eraseFromParent();

2316 Changed = true;

2317 }

2318 } else if (auto *CS = dyn_cast(&I)) {

2319 Changed |= RewriteCallsiteInsIfNeeded(*CS);

2320 }

2321 }

2322 if (!BBID || Changed)

2324 if (Seen.insert(Succ).second)

2325 Worklist.push_back(Succ);

2326 }

2327

2329 llvm::all_of(CalleeCounterMap, [&](const auto &V) { return V != 0; }) &&

2330 "Counter index mapping should be either to -1 or to non-zero index, "

2331 "because the 0 "

2332 "index corresponds to the entry BB of the caller");

2334 llvm::all_of(CalleeCallsiteMap, [&](const auto &V) { return V != 0; }) &&

2335 "Callsite index mapping should be either to -1 or to non-zero index, "

2336 "because there should have been at least a callsite - the inlined one "

2337 "- which would have had a 0 index.");

2338

2339 return {std::move(CalleeCounterMap), std::move(CalleeCallsiteMap)};

2340}

2341

2342

2343

2344

2345

2346

2347

2348

2349

2350

2351

2352

2353

2354

2355

2358 bool MergeAttributes,

2360 bool InsertLifetime,

2361 Function *ForwardVarArgsTo) {

2362 if (!CtxProf)

2363 return InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,

2364 ForwardVarArgsTo);

2365

2369

2370

2371

2372

2375 const auto CallsiteID =

2376 static_cast<uint32_t>(CallsiteIDIns->getIndex()->getZExtValue());

2377

2378 const auto NumCalleeCounters = CtxProf.getNumCounters(Callee);

2379 const auto NumCalleeCallsites = CtxProf.getNumCallsites(Callee);

2380

2381 auto Ret = InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,

2382 ForwardVarArgsTo);

2383 if (!Ret.isSuccess())

2384 return Ret;

2385

2386

2387

2388 CallsiteIDIns->eraseFromParent();

2389

2390

2391

2392

2393 const auto IndicesMaps = remapIndices(Caller, StartBB, CtxProf,

2394 NumCalleeCounters, NumCalleeCallsites);

2396

2399 const auto &[CalleeCounterMap, CalleeCallsiteMap] = IndicesMaps;

2401 (Ctx.counters().size() +

2402 llvm::count_if(CalleeCounterMap, [](auto V) { return V != -1; }) ==

2403 NewCountersSize) &&

2404 "The caller's counters size should have grown by the number of new "

2405 "distinct counters inherited from the inlined callee.");

2406 Ctx.resizeCounters(NewCountersSize);

2407

2408

2409

2410 auto CSIt = Ctx.callsites().find(CallsiteID);

2411 if (CSIt == Ctx.callsites().end())

2412 return;

2413 auto CalleeCtxIt = CSIt->second.find(CalleeGUID);

2414

2415

2416 if (CalleeCtxIt == CSIt->second.end())

2417 return;

2418

2419

2420

2421 auto &CalleeCtx = CalleeCtxIt->second;

2422 assert(CalleeCtx.guid() == CalleeGUID);

2423

2424 for (auto I = 0U; I < CalleeCtx.counters().size(); ++I) {

2425 const int64_t NewIndex = CalleeCounterMap[I];

2426 if (NewIndex >= 0) {

2427 assert(NewIndex != 0 && "counter index mapping shouldn't happen to a 0 "

2428 "index, that's the caller's entry BB");

2429 Ctx.counters()[NewIndex] = CalleeCtx.counters()[I];

2430 }

2431 }

2432 for (auto &[I, OtherSet] : CalleeCtx.callsites()) {

2433 const int64_t NewCSIdx = CalleeCallsiteMap[I];

2434 if (NewCSIdx >= 0) {

2435 assert(NewCSIdx != 0 &&

2436 "callsite index mapping shouldn't happen to a 0 index, the "

2437 "caller must've had at least one callsite (with such an index)");

2438 Ctx.ingestAllContexts(NewCSIdx, std::move(OtherSet));

2439 }

2440 }

2441

2442

2443

2444 auto Deleted = Ctx.callsites().erase(CallsiteID);

2447 };

2448 CtxProf.update(Updater, Caller);

2449 return Ret;

2450}

2451

2452

2453

2454

2455

2456

2457

2458

2459

2461 bool MergeAttributes,

2463 bool InsertLifetime,

2464 Function *ForwardVarArgsTo) {

2466

2467

2468 if (isa(CB))

2470

2471

2473

2475 if (!CalledFunc ||

2478

2479

2480

2481 Value *ConvergenceControlToken = nullptr;

2485 uint32_t Tag = OBUse.getTagID();

2486

2488 continue;

2489

2491 continue;

2493 continue;

2495 continue;

2497 ConvergenceControlToken = OBUse.Inputs[0].get();

2498 continue;

2499 }

2500

2502 }

2503 }

2504

2505

2506

2507

2508

2509

2510

2511

2512

2513

2515 if (!ConvergenceControlToken &&

2518 "convergent call needs convergencectrl operand");

2519 }

2520 }

2521

2522

2523

2525

2528

2529

2530

2531

2532

2533 if (CalledFunc->hasGC()) {

2534 if (!Caller->hasGC())

2535 Caller->setGC(CalledFunc->getGC());

2536 else if (CalledFunc->getGC() != Caller->getGC())

2538 }

2539

2540

2541 Constant *CalledPersonality =

2544 : nullptr;

2545

2546

2547

2548

2549 Constant *CallerPersonality =

2550 Caller->hasPersonalityFn()

2551 ? Caller->getPersonalityFn()->stripPointerCasts()

2552 : nullptr;

2553 if (CalledPersonality) {

2554 if (!CallerPersonality)

2555 Caller->setPersonalityFn(CalledPersonality);

2556

2557

2558

2559

2560 else if (CalledPersonality != CallerPersonality)

2562 }

2563

2564

2565

2567 if (CallerPersonality) {

2570 std::optional ParentFunclet =

2572 if (ParentFunclet)

2573 CallSiteEHPad = cast(ParentFunclet->Inputs.front());

2574

2575

2576

2577 if (CallSiteEHPad) {

2578 if (Personality == EHPersonality::MSVC_CXX) {

2579

2580

2581 if (isa(CallSiteEHPad)) {

2582

2583

2584 for (const BasicBlock &CalledBB : *CalledFunc) {

2585 if (isa(CalledBB.getFirstNonPHIIt()))

2587 }

2588 }

2590

2591

2592 for (const BasicBlock &CalledBB : *CalledFunc) {

2593 if (CalledBB.isEHPad())

2595 }

2596 }

2597 }

2598 }

2599 }

2600

2601

2602

2603 bool EHPadForCallUnwindsLocally = false;

2604 if (CallSiteEHPad && isa(CB)) {

2606 Value *CallSiteUnwindDestToken =

2608

2609 EHPadForCallUnwindsLocally =

2610 CallSiteUnwindDestToken &&

2611 !isa(CallSiteUnwindDestToken);

2612 }

2613

2614

2615

2617

2618

2619

2623

2624 {

2626 struct ByValInit {

2630 };

2631

2633

2634

2635

2636

2637

2638

2639

2640 ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());

2641

2642 auto &DL = Caller->getDataLayout();

2643

2644

2645

2647 unsigned ArgNo = 0;

2649 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {

2650 Value *ActualArg = *AI;

2651

2652

2653

2654

2655

2658 &CB, CalledFunc, IFI,

2660 if (ActualArg != *AI)

2663 }

2664

2665 VMap[&*I] = ActualArg;

2666 }

2667

2668

2669

2670

2671

2673

2676

2677

2679

2680

2681

2682

2683

2685 false, Returns, ".i",

2686 &InlinedFunctionInfo);

2687

2688 FirstNewBlock = LastBlock; ++FirstNewBlock;

2689

2690

2692 if (RVCallKind != objcarc::ARCInstKind::None)

2694

2695

2696

2697

2698

2701

2703 CalledFunc->front());

2704

2708 }

2709

2710

2711 for (ByValInit &Init : ByValInits)

2713 &*FirstNewBlock, IFI, CalledFunc);

2714

2715 std::optional ParentDeopt =

2717 if (ParentDeopt) {

2719

2721 CallBase *ICS = dyn_cast_or_null(VH);

2722 if (!ICS)

2723 continue;

2724

2726

2728

2730 ++COBi) {

2733

2735 continue;

2736 }

2737

2738

2739

2740

2741

2742 std::vector<Value *> MergedDeoptArgs;

2743 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +

2744 ChildOB.Inputs.size());

2745

2748

2749 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));

2750 }

2751

2753

2754

2755

2757

2758 VH = nullptr;

2760 }

2761 }

2762

2763

2764

2765

2768

2770

2772

2773

2774

2776 }

2777

2778

2779 SAMetadataCloner.clone();

2780 SAMetadataCloner.remap(FirstNewBlock, Caller->end());

2781

2782

2784

2785

2786

2788

2789

2790

2792

2795

2796

2798

2799

2802 make_range(FirstNewBlock->getIterator(), Caller->end()))

2804 if (auto *II = dyn_cast(&I))

2806 }

2807

2808 if (ConvergenceControlToken) {

2810 if (IntrinsicCall) {

2813 }

2814 }

2815

2816

2817

2818

2819

2820 {

2823 E = FirstNewBlock->end(); I != E; ) {

2824 AllocaInst *AI = dyn_cast(I++);

2825 if (!AI) continue;

2826

2827

2828

2831 continue;

2832 }

2833

2835 continue;

2836

2837

2839

2840

2841

2842 while (isa(I) &&

2843 !cast(I)->use_empty() &&

2846 ++I;

2847 }

2848

2849

2850

2851

2852 I.setTailBit(true);

2853 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,

2855 }

2856 }

2857

2864 }

2865

2866 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;

2869 if (CallInst *CI = dyn_cast(&CB))

2870 CallSiteTailKind = CI->getTailCallKind();

2871

2872

2875

2876 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;

2877 ++BB) {

2879 CallInst *CI = dyn_cast(&I);

2880 if (!CI)

2881 continue;

2882

2883

2884

2885 if (!VarArgsToForward.empty() &&

2886 ((ForwardVarArgsTo &&

2889

2892 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {

2893 for (unsigned ArgNo = 0;

2895 ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));

2896 }

2897

2898

2899 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());

2901 Attrs.getRetAttrs(), ArgAttrs);

2902

2904 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());

2912 CI = NewCI;

2913 }

2914

2916 InlinedDeoptimizeCalls |=

2917 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;

2918

2919

2920

2921

2922

2923

2924

2925

2926

2927

2928

2929

2930

2931

2932

2933

2936 ChildTCK = std::min(CallSiteTailKind, ChildTCK);

2939

2940

2941

2942

2943

2946 }

2947 }

2948 }

2949

2950

2951

2952

2953

2954

2955 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&

2957 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());

2959

2961 continue;

2962

2963

2964

2966 continue;

2967

2968

2971 dyn_cast(AI->getArraySize())) {

2972 auto &DL = Caller->getDataLayout();

2974 TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);

2975 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();

2976

2977

2978 if (AllocaArraySize == 0)

2979 continue;

2980

2981

2982

2984 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&

2985 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=

2988 AllocaArraySize * AllocaTypeSize);

2989 }

2990 }

2991

2994

2995

2996 if (InlinedMustTailCalls &&

2997 RI->getParent()->getTerminatingMustTailCall())

2998 continue;

2999 if (InlinedDeoptimizeCalls &&

3000 RI->getParent()->getTerminatingDeoptimizeCall())

3001 continue;

3003 }

3004 }

3005 }

3006

3007

3008

3010

3011 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())

3013

3014

3015

3017

3018

3019 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())

3020 continue;

3021 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())

3022 continue;

3024 }

3025 }

3026

3027

3028

3029

3030

3031 if (auto *II = dyn_cast(&CB)) {

3032 BasicBlock *UnwindDest = II->getUnwindDest();

3034 if (isa(FirstNonPHI)) {

3036 } else {

3038 }

3039 }

3040

3041

3042

3043

3044 if (CallSiteEHPad) {

3046 E = Caller->end();

3047 BB != E; ++BB) {

3048

3050

3051

3052

3053

3054

3055 if (auto *CleanupRet = dyn_cast(BB->getTerminator()))

3056 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)

3058

3060 if (I->isEHPad())

3061 continue;

3062

3063 if (auto *CatchSwitch = dyn_cast(I)) {

3064 if (isa(CatchSwitch->getParentPad()))

3065 CatchSwitch->setParentPad(CallSiteEHPad);

3066 } else {

3067 auto *FPI = cast(I);

3068 if (isa(FPI->getParentPad()))

3069 FPI->setParentPad(CallSiteEHPad);

3070 }

3071 }

3072 }

3073

3074 if (InlinedDeoptimizeCalls) {

3075

3076

3077

3078

3079

3080 if (Caller->getReturnType() == CB.getType()) {

3082 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;

3083 });

3084 } else {

3087 Caller->getParent(), Intrinsic::experimental_deoptimize,

3088 {Caller->getReturnType()});

3089

3091 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();

3092 if (!DeoptCall) {

3094 continue;

3095 }

3096

3097

3098

3099

3100

3101

3106

3108

3111 auto DeoptAttributes = DeoptCall->getAttributes();

3114 "Expected at least the deopt operand bundle");

3115

3118 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);

3123 else

3124 Builder.CreateRet(NewDeoptCall);

3125

3128 }

3129

3130

3131 std::swap(Returns, NormalReturns);

3132 }

3133 }

3134

3135

3136

3137

3138

3139 if (InlinedMustTailCalls) {

3140

3141 Type *NewRetTy = Caller->getReturnType();

3142 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;

3143

3144

3147 CallInst *ReturnedMustTail =

3148 RI->getParent()->getTerminatingMustTailCall();

3149 if (!ReturnedMustTail) {

3151 continue;

3152 }

3153 if (!NeedBitCast)

3154 continue;

3155

3156

3158 auto *OldCast = dyn_cast_or_null(RI->getReturnValue());

3160 if (OldCast)

3161 OldCast->eraseFromParent();

3162

3163

3166 }

3167

3168

3169 std::swap(Returns, NormalReturns);

3170 }

3171

3172

3173

3174

3175

3176

3177

3179

3181 make_range(FirstNewBlock->getIterator(), Caller->end()))

3183 if (auto *CB = dyn_cast(&I))

3187 }

3188

3189

3190

3191

3192 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {

3193

3194 OrigBB->splice(CB.getIterator(), &*FirstNewBlock, FirstNewBlock->begin(),

3195 FirstNewBlock->end());

3196

3197 Caller->back().eraseFromParent();

3198

3199

3200

3201 if (InvokeInst *II = dyn_cast(&CB)) {

3204 }

3205

3206

3207

3210 if (&CB == R->getReturnValue())

3212 else

3214 }

3215

3217

3218

3219 Returns[0]->eraseFromParent();

3220

3221 if (MergeAttributes)

3223

3224

3226 }

3227

3228

3229

3230

3231

3232

3233

3235 BranchInst *CreatedBranchToNormalDest = nullptr;

3236 if (InvokeInst *II = dyn_cast(&CB)) {

3237

3238

3240

3241

3242

3243

3244 AfterCallBB =

3246 CalledFunc->getName() + ".exit");

3247

3248 } else {

3249

3250

3251

3253 CalledFunc->getName() + ".exit");

3254 }

3255

3257

3260 }

3261

3262

3263

3264

3267 "splitBasicBlock broken!");

3269

3270

3271

3272

3273 Caller->splice(AfterCallBB->getIterator(), Caller, FirstNewBlock,

3274 Caller->end());

3275

3276

3277

3279

3281 if (Returns.size() > 1) {

3282

3283

3286 PHI->insertBefore(AfterCallBB->begin());

3287

3288

3290 }

3291

3292

3293

3294 if (PHI) {

3297 "Ret value not consistent in function!");

3298 PHI->addIncoming(RI->getReturnValue(), RI->getParent());

3299 }

3300 }

3301

3302

3309 }

3310

3311

3312

3313

3314 if (CreatedBranchToNormalDest)

3315 CreatedBranchToNormalDest->setDebugLoc(Loc);

3316 } else if (!Returns.empty()) {

3317

3318

3320 if (&CB == Returns[0]->getReturnValue())

3322 else

3324 }

3325

3326

3327 BasicBlock *ReturnBB = Returns[0]->getParent();

3329

3330

3331

3332 AfterCallBB->splice(AfterCallBB->begin(), ReturnBB);

3333

3334 if (CreatedBranchToNormalDest)

3336

3337

3338 Returns[0]->eraseFromParent();

3341

3342

3344 }

3345

3346

3348

3349

3350

3351 if (InlinedMustTailCalls && pred_empty(AfterCallBB))

3353

3354

3355

3356 assert(cast(Br)->isUnconditional() && "splitBasicBlock broken!");

3357 BasicBlock *CalleeEntry = cast(Br)->getSuccessor(0);

3358

3359

3360

3363

3364

3366

3367

3369

3370

3371

3372

3373 if (PHI) {

3376 auto &DL = Caller->getDataLayout();

3378 PHI->replaceAllUsesWith(V);

3379 PHI->eraseFromParent();

3380 }

3381 }

3382

3383 if (MergeAttributes)

3385

3387}

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

This file contains the simple types necessary to represent the attributes associated with functions a...

static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)

Update the PHI nodes in OrigBB to include the values coming from NewBB.

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)

This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...

This file contains the declarations for the subclasses of Constant, which represent the different fla...

This file defines the DenseMap class.

This file provides various utilities for inspecting and working with the control flow graph in LLVM I...

Module.h This file contains the declarations for the Module class.

static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)

static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)

Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.

static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)

Update inlined instructions' line numbers to to encode location where these instructions are inlined.

static void removeCallsiteMetadata(CallBase *Call)

static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD)

static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)

Given an EH pad, find where it unwinds.

static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))

static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)

If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...

static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)

Helper for getUnwindDestToken that does the descendant-ward part of the search.

static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)

When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...

static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)

Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.

static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))

static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)

When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...

static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)

static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap)

static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)

Update the branch metadata for cloned call instructions.

static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)

Update the block frequencies of the caller after a callee has been inlined.

static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)

static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)

static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)

static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))

static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)

If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...

static const std::pair< std::vector< int64_t >, std::vector< int64_t > > remapIndices(Function &Caller, BasicBlock *StartBB, PGOContextualProfile &CtxProf, uint32_t CalleeCounters, uint32_t CalleeCallsites)

static IntrinsicInst * getConvergenceEntry(BasicBlock &BB)

static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)

If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...

static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)

An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...

static Value * getParentPad(Value *EHPad)

Helper for getUnwindDestToken/getUnwindDestTokenHelper.

static void fixupAssignments(Function::iterator Start, Function::iterator End)

Update inlined instructions' DIAssignID metadata.

static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)

Return the result of AI->isStaticAlloca() if AI were moved to the entry block.

static bool isUsedByLifetimeMarker(Value *V)

static void removeMemProfMetadata(CallBase *Call)

static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)

When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...

static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)

If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...

static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)

static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))

static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)

static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)

static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)

Bundle operands of the inlined function must be added to inlined call sites.

static bool hasLifetimeMarkers(AllocaInst *AI)

static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList)

static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)

Return the first found DebugLoc that has a DILocation, given a range of instructions.

ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))

uint64_t IntrinsicInst * II

This file defines common analysis utilities used by the ObjC ARC Optimizer.

This file defines ARC utility functions which are used by various parts of the compiler.

This file contains the declarations for profiling metadata utility functions.

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

This file implements a set that has insertion order iteration characteristics.

This file defines the SmallPtrSet class.

This file defines the SmallVector class.

MemoryEffects getMemoryEffects(const CallBase *Call)

Return the behavior of the given call site.

Class for arbitrary precision integers.

an instruction to allocate memory on the stack

bool isSwiftError() const

Return true if this alloca is used as a swifterror argument to a call.

PointerType * getType() const

Overload to return most specific pointer type.

Type * getAllocatedType() const

Return the type that is being allocated by the instruction.

bool isUsedWithInAlloca() const

Return true if this alloca is used as an inalloca argument to a call.

const Value * getArraySize() const

Get the number of elements allocated.

This class represents an incoming formal argument to a Function.

unsigned getArgNo() const

Return the index of this formal argument in its containing function.

static uint64_t getGUID(const Function &F)

A cache of @llvm.assume calls within a function.

void registerAssumption(AssumeInst *CI)

Add an @llvm.assume intrinsic to this function's cache.

An instruction that atomically checks whether a specified value is in a memory location,...

an instruction that atomically reads a memory location, combines it with another value,...

AttrBuilder & addAlignmentAttr(MaybeAlign Align)

This turns an alignment into the form used internally in Attribute.

Attribute getAttribute(Attribute::AttrKind Kind) const

Return Attribute with the given Kind.

uint64_t getDereferenceableBytes() const

Retrieve the number of dereferenceable bytes, if the dereferenceable attribute exists (zero is return...

bool hasAttributes() const

Return true if the builder has IR-level attributes.

AttrBuilder & addAttribute(Attribute::AttrKind Val)

Add an attribute to the builder.

MaybeAlign getAlignment() const

Retrieve the alignment attribute, if it exists.

AttrBuilder & addDereferenceableAttr(uint64_t Bytes)

This turns the number of dereferenceable bytes into the form used internally in Attribute.

uint64_t getDereferenceableOrNullBytes() const

Retrieve the number of dereferenceable_or_null bytes, if the dereferenceable_or_null attribute exists...

AttrBuilder & removeAttribute(Attribute::AttrKind Val)

Remove an attribute from the builder.

AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)

This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.

AttrBuilder & addRangeAttr(const ConstantRange &CR)

Add range attribute.

AttributeList addRetAttributes(LLVMContext &C, const AttrBuilder &B) const

Add a return value attribute to the list.

static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)

Create an AttributeList with the specified parameters in it.

AttributeSet getParamAttrs(unsigned ArgNo) const

The attributes for the argument or parameter at the given index are returned.

AttributeSet removeAttribute(LLVMContext &C, Attribute::AttrKind Kind) const

Remove the specified attribute from this set.

static AttributeSet get(LLVMContext &C, const AttrBuilder &B)

const ConstantRange & getRange() const

Returns the value of the range attribute.

AttrKind

This enumeration lists the attributes that can be associated with parameters, function results,...

bool isValid() const

Return true if the attribute is any kind of attribute.

LLVM Basic Block Representation.

iterator begin()

Instruction iterator methods.

iterator_range< const_phi_iterator > phis() const

Returns a range that iterates over the phis in the basic block.

InstListType::const_iterator getFirstNonPHIIt() const

Iterator returning form of getFirstNonPHI.

BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)

Split the basic block into two basic blocks at the specified instruction.

const Function * getParent() const

Return the enclosing method, or null if none.

SymbolTableList< BasicBlock >::iterator eraseFromParent()

Unlink 'this' from the containing function and delete it.

InstListType::iterator iterator

Instruction iterators...

const Instruction * getTerminator() const LLVM_READONLY

Returns the terminator instruction if the block is well formed or null if the block is not well forme...

void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)

Transfer all instructions from FromBB to this basic block at ToIt.

void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)

Update PHI nodes in this BasicBlock before removal of predecessor Pred.

BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...

void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)

void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)

Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...

BlockFrequency getBlockFreq(const BasicBlock *BB) const

getblockFreq - Return block frequency.

Conditional or Unconditional Branch instruction.

static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

void setCallingConv(CallingConv::ID CC)

MaybeAlign getRetAlign() const

Extract the alignment of the return value.

void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const

Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.

OperandBundleUse getOperandBundleAt(unsigned Index) const

Return the operand bundle at a specific index.

std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const

Return an operand bundle by name, if present.

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

void removeRetAttrs(const AttributeMask &AttrsToRemove)

Removes the attributes from the return value.

bool hasRetAttr(Attribute::AttrKind Kind) const

Determine whether the return value has the given attribute.

unsigned getNumOperandBundles() const

Return the number of operand bundles associated with this User.

CallingConv::ID getCallingConv() const

bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Determine whether the argument or parameter has the given attribute.

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Get the attribute of a given kind from a given arg.

bool isByValArgument(unsigned ArgNo) const

Determine whether this argument is passed by value.

static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)

Create a clone of CB with operand bundle OB added.

AttributeSet getRetAttributes() const

Return the return attributes for this call.

Type * getParamByValType(unsigned ArgNo) const

Extract the byval type for a call or parameter.

Value * getCalledOperand() const

void setAttributes(AttributeList A)

Set the attributes for this call.

std::optional< ConstantRange > getRange() const

If this return value has a range attribute, return the value range of the argument.

bool doesNotThrow() const

Determine if the call cannot unwind.

Value * getArgOperand(unsigned i) const

uint64_t getRetDereferenceableBytes() const

Extract the number of dereferenceable bytes for a call or parameter (0=unknown).

bool isConvergent() const

Determine if the invoke is convergent.

FunctionType * getFunctionType() const

static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)

Create a clone of CB with a different set of operand bundles and insert it before InsertPt.

uint64_t getRetDereferenceableOrNullBytes() const

Extract the number of dereferenceable_or_null bytes for a call (0=unknown).

iterator_range< User::op_iterator > args()

Iteration adapter for range-for loops.

unsigned arg_size() const

AttributeList getAttributes() const

Return the attributes for this call.

bool hasOperandBundles() const

Return true if this User has any operand bundles.

Function * getCaller()

Helper to get the caller (the parent function).

This class represents a function call, abstracting a target machine's calling convention.

void setTailCallKind(TailCallKind TCK)

TailCallKind getTailCallKind() const

static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

bool isMustTailCall() const

static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)

This is the shared class of boolean and integer constants.

This class represents a range of values.

ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const

Return the range that results from the intersection of this range with another range.

static ConstantTokenNone * get(LLVMContext &Context)

Return the ConstantTokenNone.

This is an important base class in LLVM.

const Constant * stripPointerCasts() const

static InstrProfIncrementInst * getBBInstrumentation(BasicBlock &BB)

Get the instruction instrumenting a BB, or nullptr if not present.

static InstrProfCallsite * getCallsiteInstrumentation(CallBase &CB)

Get the instruction instrumenting a callsite, or nullptr if that cannot be found.

This class represents an Operation in the Expression.

A parsed version of the target data layout string in and methods for querying it.

Base class for non-instruction debug metadata records that have positions within IR.

DILocation * get() const

Get the underlying DILocation.

MDNode * getScope() const

static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)

Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...

iterator find(const_arg_type_t< KeyT > Val)

size_type count(const_arg_type_t< KeyT > Val) const

Return 1 if the specified key is in the map, 0 otherwise.

std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)

Implements a dense probed hash-table based set.

void recalculate(ParentType &Func)

recalculate - compute a dominator tree for the given function

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

unsigned getNumParams() const

Return the number of fixed parameters this function type requires.

Class to represent profile counts.

uint64_t getCount() const

const BasicBlock & getEntryBlock() const

BasicBlockListType::iterator iterator

FunctionType * getFunctionType() const

Returns the FunctionType for me.

const BasicBlock & front() const

iterator_range< arg_iterator > args()

DISubprogram * getSubprogram() const

Get the attached subprogram.

bool hasGC() const

hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...

CallingConv::ID getCallingConv() const

getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...

bool hasPersonalityFn() const

Check whether this function has a personality function.

Constant * getPersonalityFn() const

Get the personality function associated with this function.

bool isIntrinsic() const

isIntrinsic - Returns true if the function's name starts with "llvm.".

MaybeAlign getParamAlign(unsigned ArgNo) const

LLVMContext & getContext() const

getContext - Return a reference to the LLVMContext associated with this function.

const std::string & getGC() const

std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const

Get the entry count for this function.

Type * getReturnType() const

Returns the type of the ret val.

void setCallingConv(CallingConv::ID CC)

bool onlyReadsMemory() const

Determine if the function does not access or only reads memory.

bool hasFnAttribute(Attribute::AttrKind Kind) const

Return true if the function has the attribute.

bool isDeclaration() const

Return true if the primary definition of this global value is outside of the current translation unit...

CallInst * CreateStackSave(const Twine &Name="")

Create a call to llvm.stacksave.

CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)

Create a lifetime.start intrinsic.

CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)

Create an assume intrinsic call that represents an alignment assumption on the provided pointer.

ReturnInst * CreateRet(Value *V)

Create a 'ret ' instruction.

ConstantInt * getInt64(uint64_t C)

Get a constant 64-bit value.

CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")

Create a call to intrinsic ID with Args, mangled using Types.

Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")

ReturnInst * CreateRetVoid()

Create a 'ret void' instruction.

CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)

CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)

Create a lifetime.end intrinsic.

CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")

Create a call to llvm.stackrestore.

void SetInsertPoint(BasicBlock *TheBB)

This specifies that created instructions should be appended to the end of the specified block.

CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)

Create and insert a memcpy between the specified pointers.

Instruction * CreateNoAliasScopeDeclaration(Value *Scope)

Create a llvm.experimental.noalias.scope.decl intrinsic call.

This provides a uniform API for creating instructions and inserting them into a basic block: either a...

This class captures the data input to the InlineFunction call, and records the auxiliary results prod...

bool UpdateProfile

Update profile for callee as well as cloned version.

function_ref< AssumptionCache &(Function &)> GetAssumptionCache

If non-null, InlineFunction will update the callgraph to reflect the changes it makes.

BlockFrequencyInfo * CalleeBFI

SmallVector< AllocaInst *, 4 > StaticAllocas

InlineFunction fills this in with all static allocas that get copied into the caller.

BlockFrequencyInfo * CallerBFI

SmallVector< CallBase *, 8 > InlinedCallSites

All of the new call sites inlined into the caller.

InlineResult is basically true or false.

static InlineResult success()

static InlineResult failure(const char *Reason)

This represents the llvm.instrprof.callsite intrinsic.

This represents the llvm.instrprof.increment intrinsic.

void insertBefore(Instruction *InsertPos)

Insert an unlinked instruction into a basic block immediately before the specified instruction.

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

bool hasMetadata() const

Return true if this instruction has any metadata attached to it.

InstListType::iterator eraseFromParent()

This method unlinks 'this' from the containing basic block and deletes it.

const Function * getFunction() const

Return the function this instruction belongs to.

MDNode * getMetadata(unsigned KindID) const

Get the metadata of given kind attached to this Instruction.

void setMetadata(unsigned KindID, MDNode *Node)

Set the metadata of the specified kind to the specified node.

unsigned getOpcode() const

Returns a member of one of the enums like Instruction::Add.

void setDebugLoc(DebugLoc Loc)

Set the debug location information for this instruction.

const DataLayout & getDataLayout() const

Get the data layout of the module this instruction belongs to.

A wrapper class for inspecting calls to intrinsic functions.

static bool mayLowerToFunctionCall(Intrinsic::ID IID)

Check if the intrinsic might lower into a regular function call in the course of IR transformations.

This is an important class for using LLVM in a threaded context.

@ OB_clang_arc_attachedcall

The landingpad instruction holds all of the information necessary to generate correct exception handl...

bool isCleanup() const

Return 'true' if this landingpad instruction is a cleanup.

unsigned getNumClauses() const

Get the number of clauses for this landing pad.

Constant * getClause(unsigned Idx) const

Get the value of the clause at index Idx.

An instruction for reading from memory.

MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())

Return metadata appropriate for an alias scope root node.

MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())

Return metadata appropriate for an alias scope domain node.

void replaceAllUsesWith(Metadata *MD)

RAUW a temporary.

static MDNode * concatenate(MDNode *A, MDNode *B)

Methods for metadata merging.

ArrayRef< MDOperand > operands() const

op_iterator op_end() const

static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)

unsigned getNumOperands() const

Return number of MDNode operands.

op_iterator op_begin() const

LLVMContext & getContext() const

static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)

Return a temporary node.

bool onlyAccessesInaccessibleMem() const

Whether this function only (at most) accesses inaccessible memory.

bool onlyAccessesArgPointees() const

Whether this function only (at most) accesses argument memory.

A Module instance is used to store all the information related to an LLVM module.

A container for an operand bundle being viewed as a set of values rather than a set of uses.

The instrumented contextual profile, produced by the CtxProfAnalysis.

void update(Visitor, const Function &F)

uint32_t getNumCounters(const Function &F) const

uint32_t allocateNextCounterIndex(const Function &F)

uint32_t getNumCallsites(const Function &F) const

uint32_t allocateNextCallsiteIndex(const Function &F)

A node (context) in the loaded contextual profile, suitable for mutation during IPO passes.

void addIncoming(Value *V, BasicBlock *BB)

Add an incoming value to the end of the PHI list.

static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...

static PoisonValue * get(Type *T)

Static factory methods - Return an 'poison' object of the specified type.

Analysis providing profile information.

std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const

Returns the profile count for CallInst.

Resume the propagation of an exception.

Return a value (possibly void), from a function.

A vector that has set insertion semantics.

A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

bool contains(ConstPtrType Ptr) const

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void reserve(size_type N)

void append(ItTy in_start, ItTy in_end)

Add the specified range to the end of the SmallVector.

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

The instances of the Type class are immutable: once they are created, they are never changed.

unsigned getPointerAddressSpace() const

Get the address space of this pointer or pointer vector type.

LLVMContext & getContext() const

Return the LLVMContext in which this type was uniqued.

static IntegerType * getInt64Ty(LLVMContext &C)

bool isVoidTy() const

Return true if this is 'void'.

void setOperand(unsigned i, Value *Val)

Value * getOperand(unsigned i) const

This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...

ValueT lookup(const KeyT &Val) const

lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...

size_type count(const KeyT &Val) const

Return 1 if the specified key is in the map, 0 otherwise.

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

void replaceAllUsesWith(Value *V)

Change all uses of this to point to a new Value.

iterator_range< user_iterator > users()

LLVMContext & getContext() const

All values hold a context through their type.

StringRef getName() const

Return a constant reference to the value's name.

void takeName(Value *V)

Transfer the name from V to this value.

std::pair< iterator, bool > insert(const ValueT &V)

constexpr ScalarTy getFixedValue() const

constexpr bool isScalable() const

Returns whether the quantity is scaled by a runtime quantity (vscale).

const ParentTy * getParent() const

self_iterator getIterator()

Class to build a trie of call stack contexts for a particular profiled allocation call,...

Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...

This provides a very simple, boring adaptor for a begin and end iterator into a range type.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)

Which attributes cannot be applied to a type.

void mergeAttributesForInlining(Function &Caller, const Function &Callee)

Merge caller's and callee's attributes.

Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})

Look up the Function declaration of the intrinsic id in the Module M.

bool match(Val *V, const Pattern &P)

match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()

Match an arbitrary immediate Constant and ignore it.

AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)

Return a range of dbg.assign intrinsics which use \ID as an operand.

void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)

Track assignments to Vars between Start and End.

void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)

Replace DIAssignID uses and attachments with IDs from Map.

SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)

initializer< Ty > init(const Ty &Val)

MDNode * getMIBStackNode(const MDNode *MIB)

Returns the stack node from an MIB metadata node.

ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)

This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...

ARCInstKind

Equivalence classes of instructions in the ARC Model.

std::optional< Function * > getAttachedARCFunction(const CallBase *CB)

This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...

bool isRetainOrClaimRV(ARCInstKind Kind)

Check whether the function is retainRV/unsafeClaimRV.

const Value * GetRCIdentityRoot(const Value *V)

The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...

bool hasAttachedCallOpBundle(const CallBase *CB)

This is an optimization pass for GlobalISel generic memory operations.

UnaryFunction for_each(R &&Range, UnaryFunction F)

Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)

Get the size of a range.

BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)

Convert the CallInst to InvokeInst with the specified unwind edge basic block.

auto successors(const MachineBasicBlock *BB)

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)

PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...

void append_range(Container &C, Range &&R)

Wrapper function to append range R to container C.

const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)

This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....

iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)

Make a range that does early increment to allow mutation of the underlying range without disrupting i...

bool isScopedEHPersonality(EHPersonality Pers)

Returns true if this personality uses scope-style EH IR instructions: catchswitch,...

Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)

See if we can compute a simplified version of this instruction.

Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to infer an alignment for the specified pointer.

Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to ensure that the alignment of V is at least PrefAlign bytes.

void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)

This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...

EHPersonality classifyEHPersonality(const Value *Pers)

See if the given exception handling personality function is one that we understand.

unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)

Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...

raw_fd_ostream & errs()

This returns a reference to a raw_ostream for standard error.

bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)

Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.

void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)

Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...

bool isAssignmentTrackingEnabled(const Module &M)

Return true if assignment tracking is enabled for module M.

MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)

Compute the union of two access-group lists.

InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)

This function inlines the called function into the basic block of the caller.

bool isAsynchronousEHPersonality(EHPersonality Pers)

Returns true if this personality function catches asynchronous exceptions.

bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)

Return true if this function can prove that the instruction I will always transfer execution to one o...

bool isEscapeSource(const Value *V)

Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...

auto count_if(R &&Range, UnaryPredicate P)

Wrapper function around std::count_if to count the number of times an element satisfying a given pred...

void erase_if(Container &C, UnaryPredicate P)

Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...

void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)

This method is similar to getUnderlyingObject except that it can look through phi and select instruct...

bool pred_empty(const BasicBlock *BB)

void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)

Update the debug locations contained within the MD_loop metadata attached to the instruction I,...

bool isIdentifiedObject(const Value *V)

Return true if this pointer refers to a distinct and identifiable object.

void scaleProfData(Instruction &I, uint64_t S, uint64_t T)

Scaling the profile data attached to 'I' using the ratio of S/T.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

This struct can be used to capture information about code being cloned, while it is being cloned.

bool ContainsDynamicAllocas

This is set to true if the cloned code contains a 'dynamic' alloca.

bool isSimplified(const Value *From, const Value *To) const

bool ContainsCalls

This is set to true if the cloned code contains a normal call instruction.

bool ContainsMemProfMetadata

This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...

std::vector< WeakTrackingVH > OperandBundleCallSites

All cloned call sites that have operand bundles attached are appended to this vector.

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.

Align valueOrOne() const

For convenience, returns a valid alignment or 1 if undefined.

static Instruction * tryGetVTableInstruction(CallBase *CB)

Helper struct for trackAssignments, below.