LLVM: lib/Transforms/Utils/InlineFunction.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

75#include

76#include

77#include

78#include

79#include

80#include

81#include

82#include

83#include

84#include

85

86#define DEBUG_TYPE "inline-function"

87

88using namespace llvm;

91

95 cl::desc("Convert noalias attributes to metadata during inlining."));

96

100 cl::desc("Use the llvm.experimental.noalias.scope.decl "

101 "intrinsic during inlining."));

102

103

104

105

109 cl::desc("Convert align attributes to assumptions during inlining."));

110

112 "max-inst-checked-for-throw-during-inlining", cl::Hidden,

113 cl::desc("the maximum number of instructions analyzed for may throw during "

114 "attribute inference in inlined body"),

116

117namespace {

118

119

120 class LandingPadInliningInfo {

121

123

124

125 BasicBlock *InnerResumeDest = nullptr;

126

127

129

130

131 PHINode *InnerEHValuesPHI = nullptr;

132

134

135 public:

137 : OuterResumeDest(II->getUnwindDest()) {

138

139

140

143 for (; isa(I); ++I) {

144

146 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));

147 }

148

149 CallerLPad = cast(I);

150 }

151

152

153

154 BasicBlock *getOuterResumeDest() const {

155 return OuterResumeDest;

156 }

157

159

160 LandingPadInst *getLandingPadInst() const { return CallerLPad; }

161

162

163

164

165

166

169

170

171

172 void addIncomingPHIValuesFor(BasicBlock *BB) const {

173 addIncomingPHIValuesForInto(BB, OuterResumeDest);

174 }

175

178 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {

180 phi->addIncoming(UnwindDestPHIValues[i], src);

181 }

182 }

183 };

184}

185

188 while (I) {

189 if (auto *IntrinsicCall = dyn_cast(I)) {

190 if (IntrinsicCall->isEntry()) {

191 return IntrinsicCall;

192 }

193 }

194 I = I->getNextNode();

195 }

196 return nullptr;

197}

198

199

200BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {

201 if (InnerResumeDest) return InnerResumeDest;

202

203

205 InnerResumeDest =

207 OuterResumeDest->getName() + ".body");

208

209

210 const unsigned PHICapacity = 2;

211

212

215 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {

216 PHINode *OuterPHI = cast(I);

218 OuterPHI->getName() + ".lpad-body");

221 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);

222 }

223

224

225 InnerEHValuesPHI =

229 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);

230

231

232 return InnerResumeDest;

233}

234

235

236

237

238

239void LandingPadInliningInfo::forwardResume(

241 BasicBlock *Dest = getInnerResumeDest();

243

245

246

247

248 addIncomingPHIValuesForInto(Src, Dest);

249

252}

253

254

256 if (auto *FPI = dyn_cast(EHPad))

257 return FPI->getParentPad();

258 return cast(EHPad)->getParentPad();

259}

260

262

263

264

268

269 while (!Worklist.empty()) {

271

272

273

274

276 Value *UnwindDestToken = nullptr;

277 if (auto *CatchSwitch = dyn_cast(CurrentPad)) {

278 if (CatchSwitch->hasUnwindDest()) {

279 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();

280 } else {

281

282

283

284

285

286

287 for (auto HI = CatchSwitch->handler_begin(),

288 HE = CatchSwitch->handler_end();

289 HI != HE && !UnwindDestToken; ++HI) {

291 auto *CatchPad = cast(HandlerBlock->getFirstNonPHI());

292 for (User *Child : CatchPad->users()) {

293

294

295

296

297 if (!isa(Child) && !isa(Child))

298 continue;

299

300 Instruction *ChildPad = cast(Child);

301 auto Memo = MemoMap.find(ChildPad);

302 if (Memo == MemoMap.end()) {

303

305 continue;

306 }

307

308

309 Value *ChildUnwindDestToken = Memo->second;

310 if (!ChildUnwindDestToken)

311 continue;

312

313

314

315

316 if (isa(ChildUnwindDestToken)) {

317 UnwindDestToken = ChildUnwindDestToken;

318 break;

319 }

321 }

322 }

323 }

324 } else {

325 auto *CleanupPad = cast(CurrentPad);

326 for (User *U : CleanupPad->users()) {

327 if (auto *CleanupRet = dyn_cast(U)) {

328 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())

329 UnwindDestToken = RetUnwindDest->getFirstNonPHI();

330 else

332 break;

333 }

334 Value *ChildUnwindDestToken;

335 if (auto *Invoke = dyn_cast(U)) {

336 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();

337 } else if (isa(U) || isa(U)) {

338 Instruction *ChildPad = cast(U);

339 auto Memo = MemoMap.find(ChildPad);

340 if (Memo == MemoMap.end()) {

341

343 continue;

344 }

345

346

347 ChildUnwindDestToken = Memo->second;

348 if (!ChildUnwindDestToken)

349 continue;

350 } else {

351

352 continue;

353 }

354

355

356

357 if (isa(ChildUnwindDestToken) &&

358 getParentPad(ChildUnwindDestToken) == CleanupPad)

359 continue;

360 UnwindDestToken = ChildUnwindDestToken;

361 break;

362 }

363 }

364

365

366 if (!UnwindDestToken)

367 continue;

368

369

370

371

372

373 Value *UnwindParent;

374 if (auto *UnwindPad = dyn_cast(UnwindDestToken))

376 else

377 UnwindParent = nullptr;

378 bool ExitedOriginalPad = false;

379 for (Instruction *ExitedPad = CurrentPad;

380 ExitedPad && ExitedPad != UnwindParent;

381 ExitedPad = dyn_cast(getParentPad(ExitedPad))) {

382

383 if (isa(ExitedPad))

384 continue;

385 MemoMap[ExitedPad] = UnwindDestToken;

386 ExitedOriginalPad |= (ExitedPad == EHPad);

387 }

388

389 if (ExitedOriginalPad)

390 return UnwindDestToken;

391

392

393 }

394

395

396 return nullptr;

397}

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

418

419

420

421 if (auto *CPI = dyn_cast(EHPad))

422 EHPad = CPI->getCatchSwitch();

423

424

425 auto Memo = MemoMap.find(EHPad);

426 if (Memo != MemoMap.end())

427 return Memo->second;

428

429

431 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));

432 if (UnwindDestToken)

433 return UnwindDestToken;

434

435

436

437

438

439

440 MemoMap[EHPad] = nullptr;

441#ifndef NDEBUG

443 TempMemos.insert(EHPad);

444#endif

446 Value *AncestorToken;

448 auto *AncestorPad = dyn_cast(AncestorToken);

449 AncestorToken = getParentPad(AncestorToken)) {

450

451 if (isa(AncestorPad))

452 continue;

453

454

455

456

457

458

459

460 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);

461 auto AncestorMemo = MemoMap.find(AncestorPad);

462 if (AncestorMemo == MemoMap.end()) {

464 } else {

465 UnwindDestToken = AncestorMemo->second;

466 }

467 if (UnwindDestToken)

468 break;

469 LastUselessPad = AncestorPad;

470 MemoMap[LastUselessPad] = nullptr;

471#ifndef NDEBUG

472 TempMemos.insert(LastUselessPad);

473#endif

474 }

475

476

477

478

479

480

481

482

483

484

485

486

487

488

490 while (!Worklist.empty()) {

492 auto Memo = MemoMap.find(UselessPad);

493 if (Memo != MemoMap.end() && Memo->second) {

494

495

496

497

498

499

500

502 continue;

503 }

504

505

506

507

508

509

510

511

512

513 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));

514

515

516

517

518

519

520

521 MemoMap[UselessPad] = UnwindDestToken;

522 if (auto *CatchSwitch = dyn_cast(UselessPad)) {

523 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");

524 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {

525 auto *CatchPad = HandlerBlock->getFirstNonPHI();

526 for (User *U : CatchPad->users()) {

528 (!isa(U) ||

530 cast(U)->getUnwindDest()->getFirstNonPHI()) ==

531 CatchPad)) &&

532 "Expected useless pad");

533 if (isa(U) || isa(U))

534 Worklist.push_back(cast(U));

535 }

536 }

537 } else {

538 assert(isa(UselessPad));

539 for (User *U : UselessPad->users()) {

540 assert(!isa(U) && "Expected useless pad");

541 assert((!isa(U) ||

543 cast(U)->getUnwindDest()->getFirstNonPHI()) ==

544 UselessPad)) &&

545 "Expected useless pad");

546 if (isa(U) || isa(U))

547 Worklist.push_back(cast(U));

548 }

549 }

550 }

551

552 return UnwindDestToken;

553}

554

555

556

557

558

559

564

565

566 CallInst *CI = dyn_cast(&I);

567

569 continue;

570

571

572

573

574

575

576

578 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||

579 F->getIntrinsicID() == Intrinsic::experimental_guard)

580 continue;

581

583

584

585

586

587

588

589

590 auto *FuncletPad = cast(FuncletBundle->Inputs[0]);

591 Value *UnwindDestToken =

593 if (UnwindDestToken && !isa(UnwindDestToken))

594 continue;

595#ifndef NDEBUG

597 if (auto *CatchPad = dyn_cast(FuncletPad))

598 MemoKey = CatchPad->getCatchSwitch();

599 else

600 MemoKey = FuncletPad;

601 assert(FuncletUnwindMap->count(MemoKey) &&

602 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&

603 "must get memoized to avoid confusing later searches");

604#endif

605 }

606

608 return BB;

609 }

610 return nullptr;

611}

612

613

614

615

616

617

618

621 BasicBlock *InvokeDest = II->getUnwindDest();

622

624

625

626

627

628 LandingPadInliningInfo Invoke(II);

629

630

633 I != E; ++I)

634 if (InvokeInst *II = dyn_cast(I->getTerminator()))

635 InlinedLPads.insert(II->getLandingPadInst());

636

637

638

639 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();

642 InlinedLPad->reserveClauses(OuterNum);

643 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)

644 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));

646 InlinedLPad->setCleanup(true);

647 }

648

650 BB != E; ++BB) {

653 &*BB, Invoke.getOuterResumeDest()))

654

655

656 Invoke.addIncomingPHIValuesFor(NewBB);

657

658

659 if (ResumeInst *RI = dyn_cast(BB->getTerminator()))

660 Invoke.forwardResume(RI, InlinedLPads);

661 }

662

663

664

665

666

668}

669

670

671

672

673

674

675

678 BasicBlock *UnwindDest = II->getUnwindDest();

680

682

683

684

685

689

690 UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB));

691 }

692

693

694

697 for (Value *V : UnwindDestPHIValues) {

699 PHI->addIncoming(V, Src);

700 ++I;

701 }

702 };

703

704

705

708 BB != E; ++BB) {

709 if (auto *CRI = dyn_cast(BB->getTerminator())) {

710 if (CRI->unwindsToCaller()) {

711 auto *CleanupPad = CRI->getCleanupPad();

713 CRI->eraseFromParent();

715

716

717

718

719 assert(!FuncletUnwindMap.count(CleanupPad) ||

720 isa(FuncletUnwindMap[CleanupPad]));

721 FuncletUnwindMap[CleanupPad] =

723 }

724 }

725

727 if (I->isEHPad())

728 continue;

729

731 if (auto *CatchSwitch = dyn_cast(I)) {

732 if (CatchSwitch->unwindsToCaller()) {

733 Value *UnwindDestToken;

734 if (auto *ParentPad =

735 dyn_cast(CatchSwitch->getParentPad())) {

736

737

738

739

740

741

742

743

745 if (UnwindDestToken && !isa(UnwindDestToken))

746 continue;

747 } else {

748

749

750

751

752

753

754

756 }

758 CatchSwitch->getParentPad(), UnwindDest,

759 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),

760 CatchSwitch->getIterator());

761 for (BasicBlock *PadBB : CatchSwitch->handlers())

762 NewCatchSwitch->addHandler(PadBB);

763

764

765

766

767 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;

768 Replacement = NewCatchSwitch;

769 }

770 } else if (!isa(I)) {

772 }

773

774 if (Replacement) {

776 I->replaceAllUsesWith(Replacement);

777 I->eraseFromParent();

779 }

780 }

781

784 E = Caller->end();

785 BB != E; ++BB)

787 &*BB, UnwindDest, &FuncletUnwindMap))

788

789

791

792

793

794

795

797}

798

800 MDNode *CallsiteStackContext) {

803

804

805

806 for (auto MIBStackIter = MIBStackContext->op_begin(),

807 CallsiteStackIter = CallsiteStackContext->op_begin();

808 MIBStackIter != MIBStackContext->op_end() &&

809 CallsiteStackIter != CallsiteStackContext->op_end();

810 MIBStackIter++, CallsiteStackIter++) {

811 auto *Val1 = mdconst::dyn_extract(*MIBStackIter);

812 auto *Val2 = mdconst::dyn_extract(*CallsiteStackIter);

813 assert(Val1 && Val2);

814 if (Val1->getZExtValue() != Val2->getZExtValue())

815 return false;

816 }

817 return true;

818}

819

821 Call->setMetadata(LLVMContext::MD_memprof, nullptr);

822}

823

825 Call->setMetadata(LLVMContext::MD_callsite, nullptr);

826}

827

829 const std::vector<Metadata *> &MIBList) {

830 assert(!MIBList.empty());

831

832

835 for (Metadata *MIB : MIBList)

836 CallStack.addCallStack(cast(MIB));

837 bool MemprofMDAttached = CallStack.buildAndAttachMIBMetadata(CI);

838 assert(MemprofMDAttached == CI->hasMetadata(LLVMContext::MD_memprof));

839 if (!MemprofMDAttached)

840

842}

843

844

845

846

849 MDNode *InlinedCallsiteMD) {

850 MDNode *OrigCallsiteMD = ClonedCall->getMetadata(LLVMContext::MD_callsite);

851 MDNode *ClonedCallsiteMD = nullptr;

852

853

854 if (OrigCallsiteMD) {

855

856

857

858 ClonedCallsiteMD = MDNode::concatenate(OrigCallsiteMD, InlinedCallsiteMD);

859 ClonedCall->setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);

860 }

861

862

863 MDNode *OrigMemProfMD = ClonedCall->getMetadata(LLVMContext::MD_memprof);

864 if (!OrigMemProfMD)

865 return;

866

867

868 assert(OrigCallsiteMD);

869

870

871 std::vector<Metadata *> NewMIBList;

872

873

874

875

876 for (auto &MIBOp : OrigMemProfMD->operands()) {

877 MDNode *MIB = dyn_cast(MIBOp);

878

881

883

884 NewMIBList.push_back(MIB);

885 }

886 if (NewMIBList.empty()) {

889 return;

890 }

891 if (NewMIBList.size() < OrigMemProfMD->getNumOperands())

893}

894

895

896

897

898

899

900static void

902 bool ContainsMemProfMetadata,

905

906

907 if (!CallsiteMD && !ContainsMemProfMetadata)

908 return;

909

910

911 for (const auto &Entry : VMap) {

912

913

914 auto *OrigCall = dyn_cast_or_null(Entry.first);

915 auto *ClonedCall = dyn_cast_or_null(Entry.second);

916 if (!OrigCall || !ClonedCall)

917 continue;

918

919

920

921 if (!CallsiteMD) {

924 continue;

925 }

927 }

928}

929

930

931

932

935 MDNode *MemParallelLoopAccess =

936 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);

937 MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);

938 MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);

940 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)

941 return;

942

945

946 if (I.mayReadOrWriteMemory())

947 continue;

948

949 if (MemParallelLoopAccess) {

950

952 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),

953 MemParallelLoopAccess);

954 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,

955 MemParallelLoopAccess);

956 }

957

958 if (AccessGroup)

960 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));

961

962 if (AliasScope)

964 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));

965

966 if (NoAlias)

968 I.getMetadata(LLVMContext::MD_noalias), NoAlias));

969 }

970 }

971}

972

973

978 if (I)

979 continue;

980

982 continue;

983

984

985 auto *CalledFn =

986 dyn_cast(I->getCalledOperand()->stripPointerCasts());

987 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow() &&

989 continue;

990

992 I->getOperandBundlesAsDefs(OpBundles);

993 OpBundles.emplace_back("funclet", CallSiteEHPad);

994

997 I->replaceAllUsesWith(NewInst);

998 I->eraseFromParent();

999 }

1000}

1001

1002namespace {

1003

1004

1005

1006

1007class ScopedAliasMetadataDeepCloner {

1010 MetadataMap MDMap;

1011 void addRecursiveMetadataUses();

1012

1013public:

1014 ScopedAliasMetadataDeepCloner(const Function *F);

1015

1016

1017

1018 void clone();

1019

1020

1021

1023};

1024}

1025

1026ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(

1030 if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))

1031 MD.insert(M);

1032 if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))

1033 MD.insert(M);

1034

1035

1036 if (const auto *Decl = dyn_cast(&I))

1037 MD.insert(Decl->getScopeList());

1038 }

1039 }

1040 addRecursiveMetadataUses();

1041}

1042

1043void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {

1045 while (Queue.empty()) {

1046 const MDNode *M = cast(Queue.pop_back_val());

1047 for (const Metadata *Op : M->operands())

1048 if (const MDNode *OpMD = dyn_cast(Op))

1049 if (MD.insert(OpMD))

1050 Queue.push_back(OpMD);

1051 }

1052}

1053

1054void ScopedAliasMetadataDeepCloner::clone() {

1055 assert(MDMap.empty() && "clone() already called ?");

1056

1058 for (const MDNode *I : MD) {

1060 MDMap[I].reset(DummyNodes.back().get());

1061 }

1062

1063

1064

1065

1067 for (const MDNode *I : MD) {

1068 for (const Metadata *Op : I->operands()) {

1069 if (const MDNode *M = dyn_cast(Op))

1071 else

1073 }

1074

1076 MDTuple *TempM = cast(MDMap[I]);

1078

1081 }

1082}

1083

1086 if (MDMap.empty())

1087 return;

1088

1091

1092

1093 if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))

1094 if (MDNode *MNew = MDMap.lookup(M))

1095 I.setMetadata(LLVMContext::MD_alias_scope, MNew);

1096

1097 if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))

1098 if (MDNode *MNew = MDMap.lookup(M))

1099 I.setMetadata(LLVMContext::MD_noalias, MNew);

1100

1101 if (auto *Decl = dyn_cast(&I))

1102 if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))

1103 Decl->setScopeList(MNew);

1104 }

1105 }

1106}

1107

1108

1109

1110

1111

1116 return;

1117

1120

1121 for (const Argument &Arg : CalledFunc->args())

1122 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())

1124

1125 if (NoAliasArgs.empty())

1126 return;

1127

1128

1129

1132

1133

1134

1135

1136

1137

1138

1141

1142

1145 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {

1146 const Argument *A = NoAliasArgs[i];

1147

1148 std::string Name = std::string(CalledFunc->getName());

1149 if (A->hasName()) {

1150 Name += ": %";

1151 Name += A->getName();

1152 } else {

1153 Name += ": argument ";

1154 Name += utostr(i);

1155 }

1156

1157

1158

1159

1161 NewScopes.insert(std::make_pair(A, NewScope));

1162

1164

1165

1167 auto *NoAliasDecl =

1169

1170

1171 (void)NoAliasDecl;

1172 }

1173 }

1174

1175

1176

1178 VMI != VMIE; ++VMI) {

1179 if (const Instruction *I = dyn_cast(VMI->first)) {

1180 if (!VMI->second)

1181 continue;

1182

1183 Instruction *NI = dyn_cast(VMI->second);

1184 if (!NI || InlinedFunctionInfo.isSimplified(I, NI))

1185 continue;

1186

1187 bool IsArgMemOnlyCall = false, IsFuncCall = false;

1189

1190 if (const LoadInst *LI = dyn_cast(I))

1191 PtrArgs.push_back(LI->getPointerOperand());

1192 else if (const StoreInst *SI = dyn_cast(I))

1193 PtrArgs.push_back(SI->getPointerOperand());

1194 else if (const VAArgInst *VAAI = dyn_cast(I))

1195 PtrArgs.push_back(VAAI->getPointerOperand());

1196 else if (const AtomicCmpXchgInst *CXI = dyn_cast(I))

1197 PtrArgs.push_back(CXI->getPointerOperand());

1198 else if (const AtomicRMWInst *RMWI = dyn_cast(I))

1199 PtrArgs.push_back(RMWI->getPointerOperand());

1200 else if (const auto *Call = dyn_cast(I)) {

1201

1202

1203

1204 if (Call->doesNotAccessMemory())

1205 continue;

1206

1207 IsFuncCall = true;

1208 if (CalleeAAR) {

1210

1211

1213 continue;

1214

1216 IsArgMemOnlyCall = true;

1217 }

1218

1219 for (Value *Arg : Call->args()) {

1220

1221

1222

1223 if (!Arg->getType()->isPointerTy())

1224 continue;

1225

1227 }

1228 }

1229

1230

1231

1232

1233

1234 if (PtrArgs.empty() && !IsFuncCall)

1235 continue;

1236

1237

1238

1239

1242

1243 for (const Value *V : PtrArgs) {

1246

1247 for (const Value *O : Objects)

1249 }

1250

1251

1252

1253 bool RequiresNoCaptureBefore = false, UsesAliasingPtr = false,

1254 UsesUnknownObject = false;

1255 for (const Value *V : ObjSet) {

1256

1257

1258

1259 bool IsNonPtrConst = isa(V) || isa(V) ||

1260 isa(V) ||

1261 isa(V) || isa(V);

1262 if (IsNonPtrConst)

1263 continue;

1264

1265

1266

1267

1268 if (const Argument *A = dyn_cast(V)) {

1269 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))

1270 UsesAliasingPtr = true;

1271 } else {

1272 UsesAliasingPtr = true;

1273 }

1274

1276

1277

1278 RequiresNoCaptureBefore = true;

1280

1281

1282

1283

1284 UsesUnknownObject = true;

1285 }

1286 }

1287

1288

1289

1290 if (UsesUnknownObject)

1291 continue;

1292

1293

1294

1295 if (IsFuncCall && !IsArgMemOnlyCall)

1296 RequiresNoCaptureBefore = true;

1297

1298

1299

1300

1301

1302

1303

1304

1305

1306 for (const Argument *A : NoAliasArgs) {

1308 continue;

1309

1310

1311

1312

1313

1314 if (!RequiresNoCaptureBefore ||

1316 false, I, &DT))

1318 }

1319

1321 NI->setMetadata(LLVMContext::MD_noalias,

1323 NI->getMetadata(LLVMContext::MD_noalias),

1325

1326

1327

1328

1329

1330

1331

1332

1333

1334

1335

1336 bool CanAddScopes = !UsesAliasingPtr;

1337 if (CanAddScopes && IsFuncCall)

1338 CanAddScopes = IsArgMemOnlyCall;

1339

1340 if (CanAddScopes)

1341 for (const Argument *A : NoAliasArgs) {

1342 if (ObjSet.count(A))

1343 Scopes.push_back(NewScopes[A]);

1344 }

1345

1346 if (!Scopes.empty())

1348 LLVMContext::MD_alias_scope,

1351 }

1352 }

1353}

1354

1357

1359 "Expected to be in same basic block!");

1361 assert(BeginIt != End->getIterator() && "Non-empty BB has empty iterator");

1364}

1365

1366

1367

1372 auto &Context = CalledFunction->getContext();

1373

1374

1376 bool HasAttrToPropagate = false;

1377

1378

1379

1380

1381

1382

1384 Attribute::Dereferenceable, Attribute::DereferenceableOrNull,

1385 Attribute::NonNull, Attribute::Alignment, Attribute::Range};

1386

1387 for (unsigned I = 0, E = CB.arg_size(); I < E; ++I) {

1390

1391

1393 ValidObjParamAttrs.back().addAttribute(Attribute::ReadNone);

1395 ValidObjParamAttrs.back().addAttribute(Attribute::ReadOnly);

1396

1400 ValidExactParamAttrs.back().addAttribute(Attr);

1401 }

1402

1403 HasAttrToPropagate |= ValidObjParamAttrs.back().hasAttributes();

1404 HasAttrToPropagate |= ValidExactParamAttrs.back().hasAttributes();

1405 }

1406

1407

1408 if (!HasAttrToPropagate)

1409 return;

1410

1411 for (BasicBlock &BB : *CalledFunction) {

1413 const auto *InnerCB = dyn_cast(&Ins);

1414 if (!InnerCB)

1415 continue;

1416 auto *NewInnerCB = dyn_cast_or_null(VMap.lookup(InnerCB));

1417 if (!NewInnerCB)

1418 continue;

1419

1420

1421 if (InlinedFunctionInfo.isSimplified(InnerCB, NewInnerCB))

1422 continue;

1423

1425 for (unsigned I = 0, E = InnerCB->arg_size(); I < E; ++I) {

1426

1427

1428

1429

1430 if (NewInnerCB->paramHasAttr(I, Attribute::ByVal))

1431 continue;

1432

1433

1434 if (match(NewInnerCB->getArgOperand(I),

1436 continue;

1437

1438

1439 const Argument *Arg = dyn_cast(InnerCB->getArgOperand(I));

1440 unsigned ArgNo;

1441 if (Arg) {

1443

1444

1445

1446

1447

1449 Context, AttributeSet::get(Context, ValidExactParamAttrs[ArgNo])};

1450 if (AL.getParamDereferenceableBytes(I) >

1451 NewAB.getDereferenceableBytes())

1453 if (AL.getParamDereferenceableOrNullBytes(I) >

1454 NewAB.getDereferenceableOrNullBytes())

1455 NewAB.removeAttribute(Attribute::DereferenceableOrNull);

1456 if (AL.getParamAlignment(I).valueOrOne() >

1457 NewAB.getAlignment().valueOrOne())

1459 if (auto ExistingRange = AL.getParamRange(I)) {

1460 if (auto NewRange = NewAB.getRange()) {

1463 NewAB.removeAttribute(Attribute::Range);

1464 NewAB.addRangeAttr(CombinedRange);

1465 }

1466 }

1467 AL = AL.addParamAttributes(Context, I, NewAB);

1468 } else if (NewInnerCB->getArgOperand(I)->getType()->isPointerTy()) {

1469

1470 const Value *UnderlyingV =

1472 Arg = dyn_cast(UnderlyingV);

1473 if (!Arg)

1474 continue;

1476 } else {

1477 continue;

1478 }

1479

1480

1481 AL = AL.addParamAttributes(Context, I, ValidObjParamAttrs[ArgNo]);

1482

1483

1484

1485

1486

1487

1488 if (AL.hasParamAttr(I, Attribute::ReadOnly) &&

1489 AL.hasParamAttr(I, Attribute::WriteOnly))

1490 AL = AL.addParamAttribute(Context, I, Attribute::ReadNone);

1491

1492

1493 if (AL.hasParamAttr(I, Attribute::ReadNone)) {

1494 AL = AL.removeParamAttribute(Context, I, Attribute::ReadOnly);

1495 AL = AL.removeParamAttribute(Context, I, Attribute::WriteOnly);

1496 }

1497

1498

1499 if (AL.hasParamAttr(I, Attribute::ReadOnly) ||

1500 AL.hasParamAttr(I, Attribute::ReadNone))

1501 AL = AL.removeParamAttribute(Context, I, Attribute::Writable);

1502 }

1503 NewInnerCB->setAttributes(AL);

1504 }

1505 }

1506}

1507

1508

1509

1510

1511

1512

1513

1520 if (CB.hasRetAttr(Attribute::NoAlias))

1522 if (CB.hasRetAttr(Attribute::NoUndef))

1524 return Valid;

1525}

1526

1527

1528

1531 if (CB.hasRetAttr(Attribute::NonNull))

1533 if (CB.hasRetAttr(Attribute::Alignment))

1535 if (std::optional Range = CB.getRange())

1537 return Valid;

1538}

1539

1545 return;

1547 auto &Context = CalledFunction->getContext();

1548

1549 for (auto &BB : *CalledFunction) {

1550 auto *RI = dyn_cast(BB.getTerminator());

1551 if (!RI || !isa(RI->getOperand(0)))

1552 continue;

1553 auto *RetVal = cast(RI->getOperand(0));

1554

1555

1556

1557 auto *NewRetVal = dyn_cast_or_null(VMap.lookup(RetVal));

1558 if (!NewRetVal)

1559 continue;

1560

1561

1562

1563 if (InlinedFunctionInfo.isSimplified(RetVal, NewRetVal))

1564 continue;

1565

1566

1567

1568

1569

1570

1571

1572

1573

1574

1575

1576

1577

1578

1579

1580

1581

1582

1583 if (RI->getParent() != RetVal->getParent() ||

1585 continue;

1586

1587

1588

1589

1590

1591

1596 AL.getRetDereferenceableOrNullBytes())

1597 ValidUB.removeAttribute(Attribute::DereferenceableOrNull);

1598 AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);

1599

1600

1601

1602

1603

1604

1605

1606

1607

1608

1609

1610

1611

1612

1613

1614

1615

1616

1617

1618

1619

1620

1621

1622

1623

1624

1625

1626

1627

1628

1629

1630

1635 if (CBRange.isValid()) {

1636 Attribute NewRange = AL.getRetAttr(Attribute::Range);

1637 if (NewRange.isValid()) {

1640 }

1641 }

1642

1643

1644

1645

1646

1647

1648

1649

1650

1651

1652

1653 if (CB.hasRetAttr(Attribute::NoUndef) ||

1654 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))

1656 }

1657 NewRetVal->setAttributes(NewAL);

1658 }

1659}

1660

1661

1662

1665 return;

1666

1669

1670

1671

1673 bool DTCalculated = false;

1674

1677 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||

1678 Arg.hasNUses(0))

1679 continue;

1680 MaybeAlign Alignment = Arg.getParamAlign();

1681 if (!Alignment)

1682 continue;

1683

1684 if (!DTCalculated) {

1686 DTCalculated = true;

1687 }

1688

1689

1692 continue;

1693

1695 DL, ArgVal, Alignment->value());

1697 }

1698}

1699

1705

1707 Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));

1708

1709

1710

1711

1714

1715

1716

1717

1720 CI->setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));

1721}

1722

1723

1724

1731 const DataLayout &DL = Caller->getDataLayout();

1732

1733

1734

1735

1737

1738

1739

1740 if (ByValAlignment.valueOrOne() == 1)

1741 return Arg;

1742

1745

1746

1747

1749 *ByValAlignment)

1750 return Arg;

1751

1752

1753

1754 }

1755

1756

1757 Align Alignment = DL.getPrefTypeAlign(ByValType);

1758

1759

1760

1761

1762 if (ByValAlignment)

1763 Alignment = std::max(Alignment, *ByValAlignment);

1764

1767 nullptr, Alignment, Arg->getName());

1768 NewAlloca->insertBefore(Caller->begin()->begin());

1770

1771

1772

1773 return NewAlloca;

1774}

1775

1776

1778 for (User *U : V->users())

1780 if (II->isLifetimeStartOrEnd())

1781 return true;

1782 return false;

1783}

1784

1785

1786

1789 Type *Int8PtrTy =

1791 if (Ty == Int8PtrTy)

1793

1794

1796 if (U->getType() != Int8PtrTy) continue;

1797 if (U->stripPointerCasts() != AI) continue;

1799 return true;

1800 }

1801 return false;

1802}

1803

1804

1805

1806

1809}

1810

1811

1812

1817 return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),

1819}

1820

1821

1822

1824 Instruction *TheCall, bool CalleeHasDebugInfo) {

1826 if (!TheCallDL)

1827 return;

1828

1830 DILocation *InlinedAtNode = TheCallDL;

1831

1832

1833

1834 InlinedAtNode = DILocation::getDistinct(

1835 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),

1836 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());

1837

1838

1839

1840

1842

1843

1844

1845 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");

1846

1847

1849

1850

1851 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,

1853 if (auto *Loc = dyn_cast_or_null(MD))

1855 return MD;

1856 };

1858

1859 if (!NoInlineLineTables)

1863 I.setDebugLoc(IDL);

1864 return;

1865 }

1866

1867 if (CalleeHasDebugInfo && !NoInlineLineTables)

1868 return;

1869

1870

1871

1872

1873

1874

1875

1876

1877 if (auto *AI = dyn_cast(&I))

1879 return;

1880

1881

1882

1883

1884 if (isa(I))

1885 return;

1886

1887 I.setDebugLoc(TheCallDL);

1888 };

1889

1890

1891 auto UpdateDVR = [&](DbgRecord *DVR) {

1892 assert(DVR->getDebugLoc() && "Debug Value must have debug loc");

1893 if (NoInlineLineTables) {

1894 DVR->setDebugLoc(TheCallDL);

1895 return;

1896 }

1900 DVR->getMarker()->getParent()->getContext(), IANodes);

1901 DVR->setDebugLoc(IDL);

1902 };

1903

1904

1905 for (; FI != Fn->end(); ++FI) {

1907 UpdateInst(I);

1908 for (DbgRecord &DVR : I.getDbgRecordRange()) {

1909 UpdateDVR(&DVR);

1910 }

1911 }

1912

1913

1914 if (NoInlineLineTables) {

1916 while (BI != FI->end()) {

1917 if (isa(BI)) {

1918 BI = BI->eraseFromParent();

1919 continue;

1920 } else {

1921 BI->dropDbgRecords();

1922 }

1923 ++BI;

1924 }

1925 }

1926 }

1927}

1928

1929#undef DEBUG_TYPE

1930#define DEBUG_TYPE "assignment-tracking"

1931

1936

1938 errs() << "# Finding caller local variables escaped by callee\n");

1939 for (const Value *Arg : CB.args()) {

1941 if (!Arg->getType()->isPointerTy()) {

1943 continue;

1944 }

1945

1946 const Instruction *I = dyn_cast(Arg);

1947 if (I) {

1948 LLVM_DEBUG(errs() << " | SKIP: Not result of instruction\n");

1949 continue;

1950 }

1951

1952

1953 assert(Arg->getType()->isPtrOrPtrVectorTy());

1954 APInt TmpOffset(DL.getIndexTypeSizeInBits(Arg->getType()), 0, false);

1956 Arg->stripAndAccumulateConstantOffsets(DL, TmpOffset, true));

1957 if (Base) {

1958 LLVM_DEBUG(errs() << " | SKIP: Couldn't walk back to base storage\n");

1959 continue;

1960 }

1961

1964

1966 continue;

1967

1968

1969 auto CollectAssignsForStorage = [&](auto *DbgAssign) {

1970

1971 if (DbgAssign->getDebugLoc().getInlinedAt())

1972 return;

1973 LLVM_DEBUG(errs() << " > DEF : " << *DbgAssign << "\n");

1975 };

1978 }

1979 return EscapedLocals;

1980}

1981

1985 << Start->getParent()->getName() << " from "

1989}

1990

1991

1992

1993

1996

1997

1998 for (auto BBI = Start; BBI != End; ++BBI) {

2001 }

2002}

2003#undef DEBUG_TYPE

2004#define DEBUG_TYPE "inline-function"

2005

2006

2007

2008

2009

2010

2011

2016 const BasicBlock &CalleeEntryBlock) {

2018 for (auto Entry : VMap) {

2019 if (!isa(Entry.first) || !Entry.second)

2020 continue;

2021 auto *OrigBB = cast(Entry.first);

2022 auto *ClonedBB = cast(Entry.second);

2024 if (!ClonedBBs.insert(ClonedBB).second) {

2025

2026

2027

2029 if (NewFreq > Freq)

2030 Freq = NewFreq;

2031 }

2033 }

2034 BasicBlock *EntryClone = cast(VMap.lookup(&CalleeEntryBlock));

2036 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock), ClonedBBs);

2037}

2038

2039

2045 return;

2046 auto CallSiteCount =

2047 PSI ? PSI->getProfileCount(TheCall, CallerBFI) : std::nullopt;

2048 int64_t CallCount =

2049 std::min(CallSiteCount.value_or(0), CalleeEntryCount.getCount());

2051}

2052

2054 Function *Callee, int64_t EntryDelta,

2056 auto CalleeCount = Callee->getEntryCount();

2057 if (!CalleeCount)

2058 return;

2059

2060 const uint64_t PriorEntryCount = CalleeCount->getCount();

2061

2062

2063

2064 const uint64_t NewEntryCount =

2065 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)

2066 ? 0

2067 : PriorEntryCount + EntryDelta;

2068

2069 auto updateVTableProfWeight = [](CallBase *CB, const uint64_t NewEntryCount,

2070 const uint64_t PriorEntryCount) {

2072 if (VPtr)

2073 scaleProfData(*VPtr, NewEntryCount, PriorEntryCount);

2074 };

2075

2076

2077 if (VMap) {

2078 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;

2079 for (auto Entry : *VMap) {

2080 if (isa(Entry.first))

2081 if (auto *CI = dyn_cast_or_null(Entry.second)) {

2082 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);

2083 updateVTableProfWeight(CI, CloneEntryCount, PriorEntryCount);

2084 }

2085

2086 if (isa(Entry.first))

2087 if (auto *II = dyn_cast_or_null(Entry.second)) {

2088 II->updateProfWeight(CloneEntryCount, PriorEntryCount);

2089 updateVTableProfWeight(II, CloneEntryCount, PriorEntryCount);

2090 }

2091 }

2092 }

2093

2094 if (EntryDelta) {

2095 Callee->setEntryCount(NewEntryCount);

2096

2098

2099 if (!VMap || VMap->count(&BB))

2101 if (CallInst *CI = dyn_cast(&I)) {

2102 CI->updateProfWeight(NewEntryCount, PriorEntryCount);

2103 updateVTableProfWeight(CI, NewEntryCount, PriorEntryCount);

2104 }

2105 if (InvokeInst *II = dyn_cast(&I)) {

2106 II->updateProfWeight(NewEntryCount, PriorEntryCount);

2107 updateVTableProfWeight(II, NewEntryCount, PriorEntryCount);

2108 }

2109 }

2110 }

2111}

2112

2113

2114

2115

2116

2117

2118

2119

2120

2121

2122

2123

2124

2125

2126

2127

2128

2129

2130static void

2134 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,

2135 IsUnsafeClaimRV = !IsRetainRV;

2136

2137 for (auto *RI : Returns) {

2139 bool InsertRetainCall = IsRetainRV;

2141

2142

2143

2147

2148 if (isa(I))

2149 continue;

2150

2151 if (auto *II = dyn_cast(&I)) {

2152 if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||

2153 II->hasNUses(0) ||

2155 break;

2156

2157

2158

2159

2160

2161

2162 if (IsUnsafeClaimRV) {

2164 Builder.CreateIntrinsic(Intrinsic::objc_release, {}, RetOpnd);

2165 }

2166 II->eraseFromParent();

2167 InsertRetainCall = false;

2168 break;

2169 }

2170

2171 auto *CI = dyn_cast(&I);

2172

2173 if (!CI)

2174 break;

2175

2178 break;

2179

2180

2181

2186 NewCall->copyMetadata(*CI);

2187 CI->replaceAllUsesWith(NewCall);

2188 CI->eraseFromParent();

2189 InsertRetainCall = false;

2190 break;

2191 }

2192

2193 if (InsertRetainCall) {

2194

2195

2196

2198 Builder.CreateIntrinsic(Intrinsic::objc_retain, {}, RetOpnd);

2199 }

2200 }

2201}

2202

2203

2204

2205

2206

2207

2208

2209

2210

2211

2212

2213

2214

2215

2216

2217

2218

2219

2220

2221

2222

2223

2224static const std::pair<std::vector<int64_t>, std::vector<int64_t>>

2228

2229

2230

2231

2232 std::vector<int64_t> CalleeCounterMap;

2233 std::vector<int64_t> CalleeCallsiteMap;

2234 CalleeCounterMap.resize(CalleeCounters, -1);

2235 CalleeCallsiteMap.resize(CalleeCallsites, -1);

2236

2238 if (Ins.getNameValue() == &Caller)

2239 return false;

2240 const auto OldID = static_cast<uint32_t>(Ins.getIndex()->getZExtValue());

2241 if (CalleeCounterMap[OldID] == -1)

2243 const auto NewID = static_cast<uint32_t>(CalleeCounterMap[OldID]);

2244

2245 Ins.setNameValue(&Caller);

2246 Ins.setIndex(NewID);

2247 return true;

2248 };

2249

2250 auto RewriteCallsiteInsIfNeeded = [&](InstrProfCallsite &Ins) -> bool {

2251 if (Ins.getNameValue() == &Caller)

2252 return false;

2253 const auto OldID = static_cast<uint32_t>(Ins.getIndex()->getZExtValue());

2254 if (CalleeCallsiteMap[OldID] == -1)

2256 const auto NewID = static_cast<uint32_t>(CalleeCallsiteMap[OldID]);

2257

2258 Ins.setNameValue(&Caller);

2259 Ins.setIndex(NewID);

2260 return true;

2261 };

2262

2263 std::deque<BasicBlock *> Worklist;

2265

2266

2267

2268

2269

2270

2271

2272

2273

2274

2275

2276

2277

2278

2279

2280 Worklist.push_back(StartBB);

2281 while (!Worklist.empty()) {

2282 auto *BB = Worklist.front();

2283 Worklist.pop_front();

2284 bool Changed = false;

2286 if (BBID) {

2287 Changed |= RewriteInstrIfNeeded(*BBID);

2288

2289

2290

2291 BBID->moveBefore(&*BB->getFirstInsertionPt());

2292 }

2294 if (auto *Inc = dyn_cast(&I)) {

2295 if (isa(Inc)) {

2296

2297

2298

2299

2300

2301

2302 if (isa(Inc->getStep())) {

2303 assert(!Inc->getNextNode() || !isa(Inc->getNextNode()));

2304 Inc->eraseFromParent();

2305 } else {

2306 assert(isa_and_nonnull(Inc->getNextNode()));

2307 RewriteInstrIfNeeded(*Inc);

2308 }

2309 } else if (Inc != BBID) {

2310

2311

2312

2313

2314 Inc->eraseFromParent();

2315 Changed = true;

2316 }

2317 } else if (auto *CS = dyn_cast(&I)) {

2318 Changed |= RewriteCallsiteInsIfNeeded(*CS);

2319 }

2320 }

2321 if (!BBID || Changed)

2323 if (Seen.insert(Succ).second)

2324 Worklist.push_back(Succ);

2325 }

2326

2328 llvm::all_of(CalleeCounterMap, [&](const auto &V) { return V != 0; }) &&

2329 "Counter index mapping should be either to -1 or to non-zero index, "

2330 "because the 0 "

2331 "index corresponds to the entry BB of the caller");

2333 llvm::all_of(CalleeCallsiteMap, [&](const auto &V) { return V != 0; }) &&

2334 "Callsite index mapping should be either to -1 or to non-zero index, "

2335 "because there should have been at least a callsite - the inlined one "

2336 "- which would have had a 0 index.");

2337

2338 return {std::move(CalleeCounterMap), std::move(CalleeCallsiteMap)};

2339}

2340

2341

2342

2343

2344

2345

2346

2347

2348

2349

2350

2351

2352

2353

2354

2357 bool MergeAttributes,

2359 bool InsertLifetime,

2360 Function *ForwardVarArgsTo) {

2361 if (!CtxProf)

2362 return InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,

2363 ForwardVarArgsTo);

2364

2368

2369

2370

2371

2374 const auto CallsiteID =

2375 static_cast<uint32_t>(CallsiteIDIns->getIndex()->getZExtValue());

2376

2377 const auto NumCalleeCounters = CtxProf.getNumCounters(Callee);

2378 const auto NumCalleeCallsites = CtxProf.getNumCallsites(Callee);

2379

2380 auto Ret = InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,

2381 ForwardVarArgsTo);

2382 if (!Ret.isSuccess())

2383 return Ret;

2384

2385

2386

2387 CallsiteIDIns->eraseFromParent();

2388

2389

2390

2391

2392 const auto IndicesMaps = remapIndices(Caller, StartBB, CtxProf,

2393 NumCalleeCounters, NumCalleeCallsites);

2395

2398 const auto &[CalleeCounterMap, CalleeCallsiteMap] = IndicesMaps;

2400 (Ctx.counters().size() +

2401 llvm::count_if(CalleeCounterMap, [](auto V) { return V != -1; }) ==

2402 NewCountersSize) &&

2403 "The caller's counters size should have grown by the number of new "

2404 "distinct counters inherited from the inlined callee.");

2405 Ctx.resizeCounters(NewCountersSize);

2406

2407

2408

2409 auto CSIt = Ctx.callsites().find(CallsiteID);

2410 if (CSIt == Ctx.callsites().end())

2411 return;

2412 auto CalleeCtxIt = CSIt->second.find(CalleeGUID);

2413

2414

2415 if (CalleeCtxIt == CSIt->second.end())

2416 return;

2417

2418

2419

2420 auto &CalleeCtx = CalleeCtxIt->second;

2421 assert(CalleeCtx.guid() == CalleeGUID);

2422

2423 for (auto I = 0U; I < CalleeCtx.counters().size(); ++I) {

2424 const int64_t NewIndex = CalleeCounterMap[I];

2425 if (NewIndex >= 0) {

2426 assert(NewIndex != 0 && "counter index mapping shouldn't happen to a 0 "

2427 "index, that's the caller's entry BB");

2428 Ctx.counters()[NewIndex] = CalleeCtx.counters()[I];

2429 }

2430 }

2431 for (auto &[I, OtherSet] : CalleeCtx.callsites()) {

2432 const int64_t NewCSIdx = CalleeCallsiteMap[I];

2433 if (NewCSIdx >= 0) {

2434 assert(NewCSIdx != 0 &&

2435 "callsite index mapping shouldn't happen to a 0 index, the "

2436 "caller must've had at least one callsite (with such an index)");

2437 Ctx.ingestAllContexts(NewCSIdx, std::move(OtherSet));

2438 }

2439 }

2440

2441

2442

2443 auto Deleted = Ctx.callsites().erase(CallsiteID);

2446 };

2447 CtxProf.update(Updater, Caller);

2448 return Ret;

2449}

2450

2451

2452

2453

2454

2455

2456

2457

2458

2460 bool MergeAttributes,

2462 bool InsertLifetime,

2463 Function *ForwardVarArgsTo) {

2465

2466

2467 if (isa(CB))

2469

2470

2472

2474 if (!CalledFunc ||

2477

2478

2479

2480 Value *ConvergenceControlToken = nullptr;

2484 uint32_t Tag = OBUse.getTagID();

2485

2487 continue;

2488

2490 continue;

2492 continue;

2494 continue;

2496 ConvergenceControlToken = OBUse.Inputs[0].get();

2497 continue;

2498 }

2499

2501 }

2502 }

2503

2504

2505

2506

2507

2508

2509

2510

2511

2512

2514 if (!ConvergenceControlToken &&

2517 "convergent call needs convergencectrl operand");

2518 }

2519 }

2520

2521

2522

2524

2527

2528

2529

2530

2531

2532 if (CalledFunc->hasGC()) {

2533 if (!Caller->hasGC())

2534 Caller->setGC(CalledFunc->getGC());

2535 else if (CalledFunc->getGC() != Caller->getGC())

2537 }

2538

2539

2540 Constant *CalledPersonality =

2543 : nullptr;

2544

2545

2546

2547

2548 Constant *CallerPersonality =

2549 Caller->hasPersonalityFn()

2550 ? Caller->getPersonalityFn()->stripPointerCasts()

2551 : nullptr;

2552 if (CalledPersonality) {

2553 if (!CallerPersonality)

2554 Caller->setPersonalityFn(CalledPersonality);

2555

2556

2557

2558

2559 else if (CalledPersonality != CallerPersonality)

2561 }

2562

2563

2564

2566 if (CallerPersonality) {

2569 std::optional ParentFunclet =

2571 if (ParentFunclet)

2572 CallSiteEHPad = cast(ParentFunclet->Inputs.front());

2573

2574

2575

2576 if (CallSiteEHPad) {

2577 if (Personality == EHPersonality::MSVC_CXX) {

2578

2579

2580 if (isa(CallSiteEHPad)) {

2581

2582

2583 for (const BasicBlock &CalledBB : *CalledFunc) {

2584 if (isa(CalledBB.getFirstNonPHI()))

2586 }

2587 }

2589

2590

2591 for (const BasicBlock &CalledBB : *CalledFunc) {

2592 if (CalledBB.isEHPad())

2594 }

2595 }

2596 }

2597 }

2598 }

2599

2600

2601

2602 bool EHPadForCallUnwindsLocally = false;

2603 if (CallSiteEHPad && isa(CB)) {

2605 Value *CallSiteUnwindDestToken =

2607

2608 EHPadForCallUnwindsLocally =

2609 CallSiteUnwindDestToken &&

2610 !isa(CallSiteUnwindDestToken);

2611 }

2612

2613

2614

2616

2617

2618

2622

2623 {

2625 struct ByValInit {

2629 };

2630

2632

2633

2634

2635

2636

2637

2638

2639 ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());

2640

2641 auto &DL = Caller->getDataLayout();

2642

2643

2644

2646 unsigned ArgNo = 0;

2648 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {

2649 Value *ActualArg = *AI;

2650

2651

2652

2653

2654

2657 &CB, CalledFunc, IFI,

2659 if (ActualArg != *AI)

2662 }

2663

2664 VMap[&*I] = ActualArg;

2665 }

2666

2667

2668

2669

2670

2672

2675

2676

2678

2679

2680

2681

2682

2684 false, Returns, ".i",

2685 &InlinedFunctionInfo);

2686

2687 FirstNewBlock = LastBlock; ++FirstNewBlock;

2688

2689

2691 if (RVCallKind != objcarc::ARCInstKind::None)

2693

2694

2695

2696

2697

2700

2702 CalledFunc->front());

2703

2707 }

2708

2709

2710 for (ByValInit &Init : ByValInits)

2712 &*FirstNewBlock, IFI, CalledFunc);

2713

2714 std::optional ParentDeopt =

2716 if (ParentDeopt) {

2718

2720 CallBase *ICS = dyn_cast_or_null(VH);

2721 if (!ICS)

2722 continue;

2723

2725

2727

2729 ++COBi) {

2732

2734 continue;

2735 }

2736

2737

2738

2739

2740

2741 std::vector<Value *> MergedDeoptArgs;

2742 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +

2743 ChildOB.Inputs.size());

2744

2747

2748 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));

2749 }

2750

2752

2753

2754

2756

2757 VH = nullptr;

2759 }

2760 }

2761

2762

2763

2764

2767

2769

2771

2772

2773

2775 }

2776

2777

2778 SAMetadataCloner.clone();

2779 SAMetadataCloner.remap(FirstNewBlock, Caller->end());

2780

2781

2783

2784

2785

2787

2788

2789

2791

2794

2795

2797

2798

2801 make_range(FirstNewBlock->getIterator(), Caller->end()))

2803 if (auto *II = dyn_cast(&I))

2805 }

2806

2807 if (ConvergenceControlToken) {

2809 if (IntrinsicCall) {

2812 }

2813 }

2814

2815

2816

2817

2818

2819 {

2822 E = FirstNewBlock->end(); I != E; ) {

2823 AllocaInst *AI = dyn_cast(I++);

2824 if (!AI) continue;

2825

2826

2827

2830 continue;

2831 }

2832

2834 continue;

2835

2836

2838

2839

2840

2841 while (isa(I) &&

2842 !cast(I)->use_empty() &&

2845 ++I;

2846 }

2847

2848

2849

2850

2851 I.setTailBit(true);

2852 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,

2854 }

2855 }

2856

2863 }

2864

2865 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;

2868 if (CallInst *CI = dyn_cast(&CB))

2869 CallSiteTailKind = CI->getTailCallKind();

2870

2871

2874

2875 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;

2876 ++BB) {

2878 CallInst *CI = dyn_cast(&I);

2879 if (!CI)

2880 continue;

2881

2882

2883

2884 if (!VarArgsToForward.empty() &&

2885 ((ForwardVarArgsTo &&

2888

2891 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {

2892 for (unsigned ArgNo = 0;

2894 ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));

2895 }

2896

2897

2898 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());

2900 Attrs.getRetAttrs(), ArgAttrs);

2901

2903 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());

2911 CI = NewCI;

2912 }

2913

2915 InlinedDeoptimizeCalls |=

2916 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;

2917

2918

2919

2920

2921

2922

2923

2924

2925

2926

2927

2928

2929

2930

2931

2932

2935 ChildTCK = std::min(CallSiteTailKind, ChildTCK);

2938

2939

2940

2941

2942

2945 }

2946 }

2947 }

2948

2949

2950

2951

2952

2953

2954 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&

2956 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());

2958

2960 continue;

2961

2962

2963

2965 continue;

2966

2967

2970 dyn_cast(AI->getArraySize())) {

2971 auto &DL = Caller->getDataLayout();

2973 TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);

2974 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();

2975

2976

2977 if (AllocaArraySize == 0)

2978 continue;

2979

2980

2981

2983 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&

2984 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=

2987 AllocaArraySize * AllocaTypeSize);

2988 }

2989 }

2990

2993

2994

2995 if (InlinedMustTailCalls &&

2996 RI->getParent()->getTerminatingMustTailCall())

2997 continue;

2998 if (InlinedDeoptimizeCalls &&

2999 RI->getParent()->getTerminatingDeoptimizeCall())

3000 continue;

3002 }

3003 }

3004 }

3005

3006

3007

3009

3010 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())

3012

3013

3014

3016

3017

3018 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())

3019 continue;

3020 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())

3021 continue;

3023 }

3024 }

3025

3026

3027

3028

3029

3030 if (auto *II = dyn_cast(&CB)) {

3031 BasicBlock *UnwindDest = II->getUnwindDest();

3033 if (isa(FirstNonPHI)) {

3035 } else {

3037 }

3038 }

3039

3040

3041

3042

3043 if (CallSiteEHPad) {

3045 E = Caller->end();

3046 BB != E; ++BB) {

3047

3049

3050

3051

3052

3053

3054 if (auto *CleanupRet = dyn_cast(BB->getTerminator()))

3055 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)

3057

3059 if (I->isEHPad())

3060 continue;

3061

3062 if (auto *CatchSwitch = dyn_cast(I)) {

3063 if (isa(CatchSwitch->getParentPad()))

3064 CatchSwitch->setParentPad(CallSiteEHPad);

3065 } else {

3066 auto *FPI = cast(I);

3067 if (isa(FPI->getParentPad()))

3068 FPI->setParentPad(CallSiteEHPad);

3069 }

3070 }

3071 }

3072

3073 if (InlinedDeoptimizeCalls) {

3074

3075

3076

3077

3078

3079 if (Caller->getReturnType() == CB.getType()) {

3081 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;

3082 });

3083 } else {

3086 Caller->getParent(), Intrinsic::experimental_deoptimize,

3087 {Caller->getReturnType()});

3088

3090 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();

3091 if (!DeoptCall) {

3093 continue;

3094 }

3095

3096

3097

3098

3099

3100

3105

3107

3110 auto DeoptAttributes = DeoptCall->getAttributes();

3113 "Expected at least the deopt operand bundle");

3114

3117 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);

3122 else

3123 Builder.CreateRet(NewDeoptCall);

3124

3127 }

3128

3129

3130 std::swap(Returns, NormalReturns);

3131 }

3132 }

3133

3134

3135

3136

3137

3138 if (InlinedMustTailCalls) {

3139

3140 Type *NewRetTy = Caller->getReturnType();

3141 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;

3142

3143

3146 CallInst *ReturnedMustTail =

3147 RI->getParent()->getTerminatingMustTailCall();

3148 if (!ReturnedMustTail) {

3150 continue;

3151 }

3152 if (!NeedBitCast)

3153 continue;

3154

3155

3157 auto *OldCast = dyn_cast_or_null(RI->getReturnValue());

3159 if (OldCast)

3160 OldCast->eraseFromParent();

3161

3162

3165 }

3166

3167

3168 std::swap(Returns, NormalReturns);

3169 }

3170

3171

3172

3173

3174

3175

3176

3178

3180 make_range(FirstNewBlock->getIterator(), Caller->end()))

3182 if (auto *CB = dyn_cast(&I))

3186 }

3187

3188

3189

3190

3191 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {

3192

3193 OrigBB->splice(CB.getIterator(), &*FirstNewBlock, FirstNewBlock->begin(),

3194 FirstNewBlock->end());

3195

3196 Caller->back().eraseFromParent();

3197

3198

3199

3200 if (InvokeInst *II = dyn_cast(&CB)) {

3203 }

3204

3205

3206

3209 if (&CB == R->getReturnValue())

3211 else

3213 }

3214

3216

3217

3218 Returns[0]->eraseFromParent();

3219

3220 if (MergeAttributes)

3222

3223

3225 }

3226

3227

3228

3229

3230

3231

3232

3234 BranchInst *CreatedBranchToNormalDest = nullptr;

3235 if (InvokeInst *II = dyn_cast(&CB)) {

3236

3237

3239

3240

3241

3242

3243 AfterCallBB =

3245 CalledFunc->getName() + ".exit");

3246

3247 } else {

3248

3249

3250

3252 CalledFunc->getName() + ".exit");

3253 }

3254

3256

3259 }

3260

3261

3262

3263

3266 "splitBasicBlock broken!");

3268

3269

3270

3271

3272 Caller->splice(AfterCallBB->getIterator(), Caller, FirstNewBlock,

3273 Caller->end());

3274

3275

3276

3278

3280 if (Returns.size() > 1) {

3281

3282

3285 PHI->insertBefore(AfterCallBB->begin());

3286

3287

3289 }

3290

3291

3292

3293 if (PHI) {

3296 "Ret value not consistent in function!");

3297 PHI->addIncoming(RI->getReturnValue(), RI->getParent());

3298 }

3299 }

3300

3301

3308 }

3309

3310

3311

3312

3313 if (CreatedBranchToNormalDest)

3314 CreatedBranchToNormalDest->setDebugLoc(Loc);

3315 } else if (!Returns.empty()) {

3316

3317

3319 if (&CB == Returns[0]->getReturnValue())

3321 else

3323 }

3324

3325

3326 BasicBlock *ReturnBB = Returns[0]->getParent();

3328

3329

3330

3331 AfterCallBB->splice(AfterCallBB->begin(), ReturnBB);

3332

3333 if (CreatedBranchToNormalDest)

3335

3336

3337 Returns[0]->eraseFromParent();

3340

3341

3343 }

3344

3345

3347

3348

3349

3350 if (InlinedMustTailCalls && pred_empty(AfterCallBB))

3352

3353

3354

3355 assert(cast(Br)->isUnconditional() && "splitBasicBlock broken!");

3356 BasicBlock *CalleeEntry = cast(Br)->getSuccessor(0);

3357

3358

3359

3362

3363

3365

3366

3368

3369

3370

3371

3372 if (PHI) {

3375 auto &DL = Caller->getDataLayout();

3377 PHI->replaceAllUsesWith(V);

3378 PHI->eraseFromParent();

3379 }

3380 }

3381

3382 if (MergeAttributes)

3384

3386}

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

This file contains the simple types necessary to represent the attributes associated with functions a...

static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)

Update the PHI nodes in OrigBB to include the values coming from NewBB.

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)

This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...

This file contains the declarations for the subclasses of Constant, which represent the different fla...

This file defines the DenseMap class.

This file provides various utilities for inspecting and working with the control flow graph in LLVM I...

Module.h This file contains the declarations for the Module class.

static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)

static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)

Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.

static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)

Update inlined instructions' line numbers to to encode location where these instructions are inlined.

static void removeCallsiteMetadata(CallBase *Call)

static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD)

static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)

Given an EH pad, find where it unwinds.

static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))

static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)

If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...

static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)

Helper for getUnwindDestToken that does the descendant-ward part of the search.

static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)

When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...

static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)

Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.

static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))

static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)

When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...

static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)

static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap)

static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)

Update the branch metadata for cloned call instructions.

static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)

Update the block frequencies of the caller after a callee has been inlined.

static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)

static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)

static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)

static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))

static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)

If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...

static const std::pair< std::vector< int64_t >, std::vector< int64_t > > remapIndices(Function &Caller, BasicBlock *StartBB, PGOContextualProfile &CtxProf, uint32_t CalleeCounters, uint32_t CalleeCallsites)

static IntrinsicInst * getConvergenceEntry(BasicBlock &BB)

static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)

If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...

static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)

An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...

static Value * getParentPad(Value *EHPad)

Helper for getUnwindDestToken/getUnwindDestTokenHelper.

static void fixupAssignments(Function::iterator Start, Function::iterator End)

Update inlined instructions' DIAssignID metadata.

static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)

Return the result of AI->isStaticAlloca() if AI were moved to the entry block.

static bool isUsedByLifetimeMarker(Value *V)

static void removeMemProfMetadata(CallBase *Call)

static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)

When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...

static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)

If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...

static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)

static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))

static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)

static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)

static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)

Bundle operands of the inlined function must be added to inlined call sites.

static bool hasLifetimeMarkers(AllocaInst *AI)

static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList)

static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)

Return the first found DebugLoc that has a DILocation, given a range of instructions.

ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))

uint64_t IntrinsicInst * II

This file defines common analysis utilities used by the ObjC ARC Optimizer.

This file defines ARC utility functions which are used by various parts of the compiler.

This file contains the declarations for profiling metadata utility functions.

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

This file implements a set that has insertion order iteration characteristics.

This file defines the SmallPtrSet class.

This file defines the SmallVector class.

MemoryEffects getMemoryEffects(const CallBase *Call)

Return the behavior of the given call site.

Class for arbitrary precision integers.

an instruction to allocate memory on the stack

bool isSwiftError() const

Return true if this alloca is used as a swifterror argument to a call.

PointerType * getType() const

Overload to return most specific pointer type.

Type * getAllocatedType() const

Return the type that is being allocated by the instruction.

bool isUsedWithInAlloca() const

Return true if this alloca is used as an inalloca argument to a call.

const Value * getArraySize() const

Get the number of elements allocated.

This class represents an incoming formal argument to a Function.

unsigned getArgNo() const

Return the index of this formal argument in its containing function.

static uint64_t getGUID(const Function &F)

A cache of @llvm.assume calls within a function.

void registerAssumption(AssumeInst *CI)

Add an @llvm.assume intrinsic to this function's cache.

An instruction that atomically checks whether a specified value is in a memory location,...

an instruction that atomically reads a memory location, combines it with another value,...

AttrBuilder & addAlignmentAttr(MaybeAlign Align)

This turns an alignment into the form used internally in Attribute.

Attribute getAttribute(Attribute::AttrKind Kind) const

Return Attribute with the given Kind.

uint64_t getDereferenceableBytes() const

Retrieve the number of dereferenceable bytes, if the dereferenceable attribute exists (zero is return...

bool hasAttributes() const

Return true if the builder has IR-level attributes.

AttrBuilder & addAttribute(Attribute::AttrKind Val)

Add an attribute to the builder.

MaybeAlign getAlignment() const

Retrieve the alignment attribute, if it exists.

AttrBuilder & addDereferenceableAttr(uint64_t Bytes)

This turns the number of dereferenceable bytes into the form used internally in Attribute.

uint64_t getDereferenceableOrNullBytes() const

Retrieve the number of dereferenceable_or_null bytes, if the dereferenceable_or_null attribute exists...

AttrBuilder & removeAttribute(Attribute::AttrKind Val)

Remove an attribute from the builder.

AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)

This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.

AttrBuilder & addRangeAttr(const ConstantRange &CR)

Add range attribute.

AttributeList addRetAttributes(LLVMContext &C, const AttrBuilder &B) const

Add a return value attribute to the list.

static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)

Create an AttributeList with the specified parameters in it.

AttributeSet getParamAttrs(unsigned ArgNo) const

The attributes for the argument or parameter at the given index are returned.

AttributeSet removeAttribute(LLVMContext &C, Attribute::AttrKind Kind) const

Remove the specified attribute from this set.

static AttributeSet get(LLVMContext &C, const AttrBuilder &B)

const ConstantRange & getRange() const

Returns the value of the range attribute.

AttrKind

This enumeration lists the attributes that can be associated with parameters, function results,...

bool isValid() const

Return true if the attribute is any kind of attribute.

LLVM Basic Block Representation.

iterator begin()

Instruction iterator methods.

iterator_range< const_phi_iterator > phis() const

Returns a range that iterates over the phis in the basic block.

const Instruction * getFirstNonPHI() const

Returns a pointer to the first instruction in this block that is not a PHINode instruction.

BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)

Split the basic block into two basic blocks at the specified instruction.

const Function * getParent() const

Return the enclosing method, or null if none.

SymbolTableList< BasicBlock >::iterator eraseFromParent()

Unlink 'this' from the containing function and delete it.

InstListType::iterator iterator

Instruction iterators...

const Instruction * getTerminator() const LLVM_READONLY

Returns the terminator instruction if the block is well formed or null if the block is not well forme...

void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)

Transfer all instructions from FromBB to this basic block at ToIt.

void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)

Update PHI nodes in this BasicBlock before removal of predecessor Pred.

BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...

void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)

void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)

Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...

BlockFrequency getBlockFreq(const BasicBlock *BB) const

getblockFreq - Return block frequency.

Conditional or Unconditional Branch instruction.

static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

void setCallingConv(CallingConv::ID CC)

MaybeAlign getRetAlign() const

Extract the alignment of the return value.

void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const

Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.

OperandBundleUse getOperandBundleAt(unsigned Index) const

Return the operand bundle at a specific index.

std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const

Return an operand bundle by name, if present.

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

void removeRetAttrs(const AttributeMask &AttrsToRemove)

Removes the attributes from the return value.

bool hasRetAttr(Attribute::AttrKind Kind) const

Determine whether the return value has the given attribute.

unsigned getNumOperandBundles() const

Return the number of operand bundles associated with this User.

CallingConv::ID getCallingConv() const

bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Determine whether the argument or parameter has the given attribute.

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Get the attribute of a given kind from a given arg.

bool isByValArgument(unsigned ArgNo) const

Determine whether this argument is passed by value.

static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)

Create a clone of CB with operand bundle OB added.

AttributeSet getRetAttributes() const

Return the return attributes for this call.

Type * getParamByValType(unsigned ArgNo) const

Extract the byval type for a call or parameter.

Value * getCalledOperand() const

void setAttributes(AttributeList A)

Set the attributes for this call.

std::optional< ConstantRange > getRange() const

If this return value has a range attribute, return the value range of the argument.

bool doesNotThrow() const

Determine if the call cannot unwind.

Value * getArgOperand(unsigned i) const

uint64_t getRetDereferenceableBytes() const

Extract the number of dereferenceable bytes for a call or parameter (0=unknown).

bool isConvergent() const

Determine if the invoke is convergent.

FunctionType * getFunctionType() const

static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)

Create a clone of CB with a different set of operand bundles and insert it before InsertPt.

uint64_t getRetDereferenceableOrNullBytes() const

Extract the number of dereferenceable_or_null bytes for a call (0=unknown).

iterator_range< User::op_iterator > args()

Iteration adapter for range-for loops.

unsigned arg_size() const

AttributeList getAttributes() const

Return the attributes for this call.

bool hasOperandBundles() const

Return true if this User has any operand bundles.

Function * getCaller()

Helper to get the caller (the parent function).

This class represents a function call, abstracting a target machine's calling convention.

void setTailCallKind(TailCallKind TCK)

TailCallKind getTailCallKind() const

static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

bool isMustTailCall() const

static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)

This is the shared class of boolean and integer constants.

This class represents a range of values.

ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const

Return the range that results from the intersection of this range with another range.

static ConstantTokenNone * get(LLVMContext &Context)

Return the ConstantTokenNone.

This is an important base class in LLVM.

const Constant * stripPointerCasts() const

static InstrProfIncrementInst * getBBInstrumentation(BasicBlock &BB)

Get the instruction instrumenting a BB, or nullptr if not present.

static InstrProfCallsite * getCallsiteInstrumentation(CallBase &CB)

Get the instruction instrumenting a callsite, or nullptr if that cannot be found.

This class represents an Operation in the Expression.

A parsed version of the target data layout string in and methods for querying it.

Base class for non-instruction debug metadata records that have positions within IR.

DILocation * get() const

Get the underlying DILocation.

MDNode * getScope() const

static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)

Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...

iterator find(const_arg_type_t< KeyT > Val)

size_type count(const_arg_type_t< KeyT > Val) const

Return 1 if the specified key is in the map, 0 otherwise.

std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)

Implements a dense probed hash-table based set.

void recalculate(ParentType &Func)

recalculate - compute a dominator tree for the given function

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

unsigned getNumParams() const

Return the number of fixed parameters this function type requires.

Class to represent profile counts.

uint64_t getCount() const

const BasicBlock & getEntryBlock() const

BasicBlockListType::iterator iterator

FunctionType * getFunctionType() const

Returns the FunctionType for me.

const BasicBlock & front() const

iterator_range< arg_iterator > args()

DISubprogram * getSubprogram() const

Get the attached subprogram.

bool hasGC() const

hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...

CallingConv::ID getCallingConv() const

getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...

bool hasPersonalityFn() const

Check whether this function has a personality function.

Constant * getPersonalityFn() const

Get the personality function associated with this function.

bool isIntrinsic() const

isIntrinsic - Returns true if the function's name starts with "llvm.".

MaybeAlign getParamAlign(unsigned ArgNo) const

LLVMContext & getContext() const

getContext - Return a reference to the LLVMContext associated with this function.

const std::string & getGC() const

std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const

Get the entry count for this function.

Type * getReturnType() const

Returns the type of the ret val.

void setCallingConv(CallingConv::ID CC)

bool onlyReadsMemory() const

Determine if the function does not access or only reads memory.

bool hasFnAttribute(Attribute::AttrKind Kind) const

Return true if the function has the attribute.

bool isDeclaration() const

Return true if the primary definition of this global value is outside of the current translation unit...

CallInst * CreateStackSave(const Twine &Name="")

Create a call to llvm.stacksave.

CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)

Create a lifetime.start intrinsic.

CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)

Create an assume intrinsic call that represents an alignment assumption on the provided pointer.

ReturnInst * CreateRet(Value *V)

Create a 'ret ' instruction.

ConstantInt * getInt64(uint64_t C)

Get a constant 64-bit value.

CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")

Create a call to intrinsic ID with Args, mangled using Types.

Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")

ReturnInst * CreateRetVoid()

Create a 'ret void' instruction.

CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)

CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)

Create a lifetime.end intrinsic.

CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")

Create a call to llvm.stackrestore.

void SetInsertPoint(BasicBlock *TheBB)

This specifies that created instructions should be appended to the end of the specified block.

CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)

Create and insert a memcpy between the specified pointers.

Instruction * CreateNoAliasScopeDeclaration(Value *Scope)

Create a llvm.experimental.noalias.scope.decl intrinsic call.

This provides a uniform API for creating instructions and inserting them into a basic block: either a...

This class captures the data input to the InlineFunction call, and records the auxiliary results prod...

bool UpdateProfile

Update profile for callee as well as cloned version.

function_ref< AssumptionCache &(Function &)> GetAssumptionCache

If non-null, InlineFunction will update the callgraph to reflect the changes it makes.

BlockFrequencyInfo * CalleeBFI

SmallVector< AllocaInst *, 4 > StaticAllocas

InlineFunction fills this in with all static allocas that get copied into the caller.

BlockFrequencyInfo * CallerBFI

SmallVector< CallBase *, 8 > InlinedCallSites

All of the new call sites inlined into the caller.

InlineResult is basically true or false.

static InlineResult success()

static InlineResult failure(const char *Reason)

This represents the llvm.instrprof.callsite intrinsic.

This represents the llvm.instrprof.increment intrinsic.

void insertBefore(Instruction *InsertPos)

Insert an unlinked instruction into a basic block immediately before the specified instruction.

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

bool hasMetadata() const

Return true if this instruction has any metadata attached to it.

bool isEHPad() const

Return true if the instruction is a variety of EH-block.

InstListType::iterator eraseFromParent()

This method unlinks 'this' from the containing basic block and deletes it.

const Function * getFunction() const

Return the function this instruction belongs to.

MDNode * getMetadata(unsigned KindID) const

Get the metadata of given kind attached to this Instruction.

void setMetadata(unsigned KindID, MDNode *Node)

Set the metadata of the specified kind to the specified node.

unsigned getOpcode() const

Returns a member of one of the enums like Instruction::Add.

void setDebugLoc(DebugLoc Loc)

Set the debug location information for this instruction.

const DataLayout & getDataLayout() const

Get the data layout of the module this instruction belongs to.

A wrapper class for inspecting calls to intrinsic functions.

static bool mayLowerToFunctionCall(Intrinsic::ID IID)

Check if the intrinsic might lower into a regular function call in the course of IR transformations.

This is an important class for using LLVM in a threaded context.

@ OB_clang_arc_attachedcall

The landingpad instruction holds all of the information necessary to generate correct exception handl...

bool isCleanup() const

Return 'true' if this landingpad instruction is a cleanup.

unsigned getNumClauses() const

Get the number of clauses for this landing pad.

Constant * getClause(unsigned Idx) const

Get the value of the clause at index Idx.

An instruction for reading from memory.

MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())

Return metadata appropriate for an alias scope root node.

MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())

Return metadata appropriate for an alias scope domain node.

void replaceAllUsesWith(Metadata *MD)

RAUW a temporary.

static MDNode * concatenate(MDNode *A, MDNode *B)

Methods for metadata merging.

ArrayRef< MDOperand > operands() const

op_iterator op_end() const

static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)

unsigned getNumOperands() const

Return number of MDNode operands.

op_iterator op_begin() const

LLVMContext & getContext() const

static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)

Return a temporary node.

bool onlyAccessesInaccessibleMem() const

Whether this function only (at most) accesses inaccessible memory.

bool onlyAccessesArgPointees() const

Whether this function only (at most) accesses argument memory.

A Module instance is used to store all the information related to an LLVM module.

A container for an operand bundle being viewed as a set of values rather than a set of uses.

The instrumented contextual profile, produced by the CtxProfAnalysis.

void update(Visitor, const Function &F)

uint32_t getNumCounters(const Function &F) const

uint32_t allocateNextCounterIndex(const Function &F)

uint32_t getNumCallsites(const Function &F) const

uint32_t allocateNextCallsiteIndex(const Function &F)

A node (context) in the loaded contextual profile, suitable for mutation during IPO passes.

void addIncoming(Value *V, BasicBlock *BB)

Add an incoming value to the end of the PHI list.

static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...

static PoisonValue * get(Type *T)

Static factory methods - Return an 'poison' object of the specified type.

Analysis providing profile information.

std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const

Returns the profile count for CallInst.

Resume the propagation of an exception.

Return a value (possibly void), from a function.

A vector that has set insertion semantics.

A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

bool contains(ConstPtrType Ptr) const

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void reserve(size_type N)

void append(ItTy in_start, ItTy in_end)

Add the specified range to the end of the SmallVector.

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

The instances of the Type class are immutable: once they are created, they are never changed.

unsigned getPointerAddressSpace() const

Get the address space of this pointer or pointer vector type.

LLVMContext & getContext() const

Return the LLVMContext in which this type was uniqued.

static IntegerType * getInt64Ty(LLVMContext &C)

bool isVoidTy() const

Return true if this is 'void'.

void setOperand(unsigned i, Value *Val)

Value * getOperand(unsigned i) const

This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...

ValueT lookup(const KeyT &Val) const

lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...

size_type count(const KeyT &Val) const

Return 1 if the specified key is in the map, 0 otherwise.

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

void replaceAllUsesWith(Value *V)

Change all uses of this to point to a new Value.

iterator_range< user_iterator > users()

LLVMContext & getContext() const

All values hold a context through their type.

StringRef getName() const

Return a constant reference to the value's name.

void takeName(Value *V)

Transfer the name from V to this value.

std::pair< iterator, bool > insert(const ValueT &V)

constexpr ScalarTy getFixedValue() const

constexpr bool isScalable() const

Returns whether the quantity is scaled by a runtime quantity (vscale).

const ParentTy * getParent() const

self_iterator getIterator()

Class to build a trie of call stack contexts for a particular profiled allocation call,...

Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...

This provides a very simple, boring adaptor for a begin and end iterator into a range type.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)

Which attributes cannot be applied to a type.

void mergeAttributesForInlining(Function &Caller, const Function &Callee)

Merge caller's and callee's attributes.

Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})

Look up the Function declaration of the intrinsic id in the Module M.

bool match(Val *V, const Pattern &P)

match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()

Match an arbitrary immediate Constant and ignore it.

AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)

Return a range of dbg.assign intrinsics which use \ID as an operand.

void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)

Track assignments to Vars between Start and End.

void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)

Replace DIAssignID uses and attachments with IDs from Map.

SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)

initializer< Ty > init(const Ty &Val)

MDNode * getMIBStackNode(const MDNode *MIB)

Returns the stack node from an MIB metadata node.

ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)

This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...

ARCInstKind

Equivalence classes of instructions in the ARC Model.

std::optional< Function * > getAttachedARCFunction(const CallBase *CB)

This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...

bool isRetainOrClaimRV(ARCInstKind Kind)

Check whether the function is retainRV/unsafeClaimRV.

const Value * GetRCIdentityRoot(const Value *V)

The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...

bool hasAttachedCallOpBundle(const CallBase *CB)

This is an optimization pass for GlobalISel generic memory operations.

UnaryFunction for_each(R &&Range, UnaryFunction F)

Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)

Get the size of a range.

BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)

Convert the CallInst to InvokeInst with the specified unwind edge basic block.

auto successors(const MachineBasicBlock *BB)

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)

PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...

void append_range(Container &C, Range &&R)

Wrapper function to append range R to container C.

const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)

This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....

iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)

Make a range that does early increment to allow mutation of the underlying range without disrupting i...

bool isScopedEHPersonality(EHPersonality Pers)

Returns true if this personality uses scope-style EH IR instructions: catchswitch,...

Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)

See if we can compute a simplified version of this instruction.

Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to infer an alignment for the specified pointer.

Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to ensure that the alignment of V is at least PrefAlign bytes.

void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)

This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...

EHPersonality classifyEHPersonality(const Value *Pers)

See if the given exception handling personality function is one that we understand.

unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)

Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...

raw_fd_ostream & errs()

This returns a reference to a raw_ostream for standard error.

bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)

Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.

void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)

Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...

bool isAssignmentTrackingEnabled(const Module &M)

Return true if assignment tracking is enabled for module M.

MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)

Compute the union of two access-group lists.

InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)

This function inlines the called function into the basic block of the caller.

bool isAsynchronousEHPersonality(EHPersonality Pers)

Returns true if this personality function catches asynchronous exceptions.

bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)

Return true if this function can prove that the instruction I will always transfer execution to one o...

bool isEscapeSource(const Value *V)

Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...

auto count_if(R &&Range, UnaryPredicate P)

Wrapper function around std::count_if to count the number of times an element satisfying a given pred...

void erase_if(Container &C, UnaryPredicate P)

Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...

void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)

This method is similar to getUnderlyingObject except that it can look through phi and select instruct...

bool pred_empty(const BasicBlock *BB)

void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)

Update the debug locations contained within the MD_loop metadata attached to the instruction I,...

bool isIdentifiedObject(const Value *V)

Return true if this pointer refers to a distinct and identifiable object.

void scaleProfData(Instruction &I, uint64_t S, uint64_t T)

Scaling the profile data attached to 'I' using the ratio of S/T.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

This struct can be used to capture information about code being cloned, while it is being cloned.

bool ContainsDynamicAllocas

This is set to true if the cloned code contains a 'dynamic' alloca.

bool isSimplified(const Value *From, const Value *To) const

bool ContainsCalls

This is set to true if the cloned code contains a normal call instruction.

bool ContainsMemProfMetadata

This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...

std::vector< WeakTrackingVH > OperandBundleCallSites

All cloned call sites that have operand bundles attached are appended to this vector.

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.

Align valueOrOne() const

For convenience, returns a valid alignment or 1 if undefined.

static Instruction * tryGetVTableInstruction(CallBase *CB)

Helper struct for trackAssignments, below.