LLVM: lib/Transforms/Utils/InlineFunction.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

75#include

76#include

77#include

78#include

79#include

80#include

81#include

82#include

83#include

84

85#define DEBUG_TYPE "inline-function"

86

87using namespace llvm;

90

94 cl::desc("Convert noalias attributes to metadata during inlining."));

95

99 cl::desc("Use the llvm.experimental.noalias.scope.decl "

100 "intrinsic during inlining."));

101

102

103

104

108 cl::desc("Convert align attributes to assumptions during inlining."));

109

111 "max-inst-checked-for-throw-during-inlining", cl::Hidden,

112 cl::desc("the maximum number of instructions analyzed for may throw during "

113 "attribute inference in inlined body"),

115

116namespace {

117

118

119 class LandingPadInliningInfo {

120

122

123

124 BasicBlock *InnerResumeDest = nullptr;

125

126

128

129

130 PHINode *InnerEHValuesPHI = nullptr;

131

133

134 public:

136 : OuterResumeDest(II->getUnwindDest()) {

137

138

139

143

145 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));

146 }

147

149 }

150

151

152

153 BasicBlock *getOuterResumeDest() const {

154 return OuterResumeDest;

155 }

156

158

159 LandingPadInst *getLandingPadInst() const { return CallerLPad; }

160

161

162

163

164

165

166 void forwardResume(ResumeInst *RI,

167 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);

168

169

170

171 void addIncomingPHIValuesFor(BasicBlock *BB) const {

172 addIncomingPHIValuesForInto(BB, OuterResumeDest);

173 }

174

175 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {

177 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {

179 phi->addIncoming(UnwindDestPHIValues[i], src);

180 }

181 }

182 };

183}

184

187 while (It != BB.end()) {

189 if (IntrinsicCall->isEntry()) {

190 return IntrinsicCall;

191 }

192 }

193 It = std::next(It);

194 }

195 return nullptr;

196}

197

198

199BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {

200 if (InnerResumeDest) return InnerResumeDest;

201

202

204 InnerResumeDest =

206 OuterResumeDest->getName() + ".body");

207

208

209 const unsigned PHICapacity = 2;

210

211

214 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {

217 OuterPHI->getName() + ".lpad-body");

220 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);

221 }

222

223

224 InnerEHValuesPHI =

228 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);

229

230

231 return InnerResumeDest;

232}

233

234

235

236

237

238void LandingPadInliningInfo::forwardResume(

239 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {

240 BasicBlock *Dest = getInnerResumeDest();

242

245

246

247

248 addIncomingPHIValuesForInto(Src, Dest);

249

252}

253

254

257 return FPI->getParentPad();

259}

260

262

263

264

268

269 while (!Worklist.empty()) {

271

272

273

274

276 Value *UnwindDestToken = nullptr;

278 if (CatchSwitch->hasUnwindDest()) {

279 UnwindDestToken = &*CatchSwitch->getUnwindDest()->getFirstNonPHIIt();

280 } else {

281

282

283

284

285

286

287 for (auto HI = CatchSwitch->handler_begin(),

288 HE = CatchSwitch->handler_end();

289 HI != HE && !UnwindDestToken; ++HI) {

291 auto *CatchPad =

293 for (User *Child : CatchPad->users()) {

294

295

296

297

299 continue;

300

302 auto Memo = MemoMap.find(ChildPad);

303 if (Memo == MemoMap.end()) {

304

306 continue;

307 }

308

309

310 Value *ChildUnwindDestToken = Memo->second;

311 if (!ChildUnwindDestToken)

312 continue;

313

314

315

316

318 UnwindDestToken = ChildUnwindDestToken;

319 break;

320 }

322 }

323 }

324 }

325 } else {

327 for (User *U : CleanupPad->users()) {

329 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())

330 UnwindDestToken = &*RetUnwindDest->getFirstNonPHIIt();

331 else

333 break;

334 }

335 Value *ChildUnwindDestToken;

337 ChildUnwindDestToken = &*Invoke->getUnwindDest()->getFirstNonPHIIt();

340 auto Memo = MemoMap.find(ChildPad);

341 if (Memo == MemoMap.end()) {

342

344 continue;

345 }

346

347

348 ChildUnwindDestToken = Memo->second;

349 if (!ChildUnwindDestToken)

350 continue;

351 } else {

352

353 continue;

354 }

355

356

357

359 getParentPad(ChildUnwindDestToken) == CleanupPad)

360 continue;

361 UnwindDestToken = ChildUnwindDestToken;

362 break;

363 }

364 }

365

366

367 if (!UnwindDestToken)

368 continue;

369

370

371

372

373

374 Value *UnwindParent;

377 else

378 UnwindParent = nullptr;

379 bool ExitedOriginalPad = false;

380 for (Instruction *ExitedPad = CurrentPad;

381 ExitedPad && ExitedPad != UnwindParent;

383

385 continue;

386 MemoMap[ExitedPad] = UnwindDestToken;

387 ExitedOriginalPad |= (ExitedPad == EHPad);

388 }

389

390 if (ExitedOriginalPad)

391 return UnwindDestToken;

392

393

394 }

395

396

397 return nullptr;

398}

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

419

420

421

423 EHPad = CPI->getCatchSwitch();

424

425

426 auto Memo = MemoMap.find(EHPad);

427 if (Memo != MemoMap.end())

428 return Memo->second;

429

430

432 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));

433 if (UnwindDestToken)

434 return UnwindDestToken;

435

436

437

438

439

440

441 MemoMap[EHPad] = nullptr;

442#ifndef NDEBUG

444 TempMemos.insert(EHPad);

445#endif

447 Value *AncestorToken;

450 AncestorToken = getParentPad(AncestorToken)) {

451

453 continue;

454

455

456

457

458

459

460

461 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);

462 auto AncestorMemo = MemoMap.find(AncestorPad);

463 if (AncestorMemo == MemoMap.end()) {

465 } else {

466 UnwindDestToken = AncestorMemo->second;

467 }

468 if (UnwindDestToken)

469 break;

470 LastUselessPad = AncestorPad;

471 MemoMap[LastUselessPad] = nullptr;

472#ifndef NDEBUG

473 TempMemos.insert(LastUselessPad);

474#endif

475 }

476

477

478

479

480

481

482

483

484

485

486

487

488

489

491 while (!Worklist.empty()) {

493 auto Memo = MemoMap.find(UselessPad);

494 if (Memo != MemoMap.end() && Memo->second) {

495

496

497

498

499

500

501

503 continue;

504 }

505

506

507

508

509

510

511

512

513

514 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));

515

516

517

518

519

520

521

522 MemoMap[UselessPad] = UnwindDestToken;

524 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");

525 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {

526 auto *CatchPad = &*HandlerBlock->getFirstNonPHIIt();

527 for (User *U : CatchPad->users()) {

530 ->getUnwindDest()

531 ->getFirstNonPHIIt()) == CatchPad)) &&

532 "Expected useless pad");

535 }

536 }

537 } else {

539 for (User *U : UselessPad->users()) {

544 &*cast(U)->getUnwindDest()->getFirstNonPHIIt()) ==

545 UselessPad)) &&

546 "Expected useless pad");

549 }

550 }

551 }

552

553 return UnwindDestToken;

554}

555

556

557

558

559

560

565

566

568

570 continue;

571

572

573

574

575

576

577

579 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||

580 F->getIntrinsicID() == Intrinsic::experimental_guard)

581 continue;

582

584

585

586

587

588

589

590

592 Value *UnwindDestToken =

595 continue;

596#ifndef NDEBUG

599 MemoKey = CatchPad->getCatchSwitch();

600 else

601 MemoKey = FuncletPad;

602 assert(FuncletUnwindMap->count(MemoKey) &&

603 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&

604 "must get memoized to avoid confusing later searches");

605#endif

606 }

607

609 return BB;

610 }

611 return nullptr;

612}

613

614

615

616

617

618

619

622 BasicBlock *InvokeDest = II->getUnwindDest();

623

625

626

627

628

629 LandingPadInliningInfo Invoke(II);

630

631

634 I != E; ++I)

636 InlinedLPads.insert(II->getLandingPadInst());

637

638

639

640 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();

643 InlinedLPad->reserveClauses(OuterNum);

644 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)

645 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));

647 InlinedLPad->setCleanup(true);

648 }

649

651 BB != E; ++BB) {

654 &*BB, Invoke.getOuterResumeDest()))

655

656

657 Invoke.addIncomingPHIValuesFor(NewBB);

658

659

661 Invoke.forwardResume(RI, InlinedLPads);

662 }

663

664

665

666

667

669}

670

671

672

673

674

675

676

679 BasicBlock *UnwindDest = II->getUnwindDest();

681

683

684

685

686

690

691 UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB));

692 }

693

694

695

698 for (Value *V : UnwindDestPHIValues) {

700 PHI->addIncoming(V, Src);

701 ++I;

702 }

703 };

704

705

706

709 BB != E; ++BB) {

711 if (CRI->unwindsToCaller()) {

712 auto *CleanupPad = CRI->getCleanupPad();

714 CRI->eraseFromParent();

716

717

718

719

720 assert(!FuncletUnwindMap.count(CleanupPad) ||

722 FuncletUnwindMap[CleanupPad] =

724 }

725 }

726

728 if (I->isEHPad())

729 continue;

730

733 if (CatchSwitch->unwindsToCaller()) {

734 Value *UnwindDestToken;

735 if (auto *ParentPad =

737

738

739

740

741

742

743

744

747 continue;

748 } else {

749

750

751

752

753

754

755

757 }

759 CatchSwitch->getParentPad(), UnwindDest,

760 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),

761 CatchSwitch->getIterator());

762 for (BasicBlock *PadBB : CatchSwitch->handlers())

763 NewCatchSwitch->addHandler(PadBB);

764

765

766

767

768 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;

769 Replacement = NewCatchSwitch;

770 }

773 }

774

775 if (Replacement) {

777 I->replaceAllUsesWith(Replacement);

778 I->eraseFromParent();

780 }

781 }

782

785 E = Caller->end();

786 BB != E; ++BB)

788 &*BB, UnwindDest, &FuncletUnwindMap))

789

790

792

793

794

795

796

798}

799

801 MDNode *CallsiteStackContext) {

804

805

806

807 for (auto MIBStackIter = MIBStackContext->op_begin(),

808 CallsiteStackIter = CallsiteStackContext->op_begin();

809 MIBStackIter != MIBStackContext->op_end() &&

810 CallsiteStackIter != CallsiteStackContext->op_end();

811 MIBStackIter++, CallsiteStackIter++) {

814 assert(Val1 && Val2);

815 if (Val1->getZExtValue() != Val2->getZExtValue())

816 return false;

817 }

818 return true;

819}

820

822 Call->setMetadata(LLVMContext::MD_memprof, nullptr);

823}

824

826 Call->setMetadata(LLVMContext::MD_callsite, nullptr);

827}

828

830 const std::vector<Metadata *> &MIBList,

832 assert(!MIBList.empty());

833

834

837 for (Metadata *MIB : MIBList)

839 bool MemprofMDAttached = CallStack.buildAndAttachMIBMetadata(CI);

840 assert(MemprofMDAttached == CI->hasMetadata(LLVMContext::MD_memprof));

841 if (!MemprofMDAttached)

842

844}

845

846

847

848

851 MDNode *InlinedCallsiteMD,

853 MDNode *OrigCallsiteMD = ClonedCall->getMetadata(LLVMContext::MD_callsite);

854 MDNode *ClonedCallsiteMD = nullptr;

855

856

857 if (OrigCallsiteMD) {

858

859

860

861 ClonedCallsiteMD = MDNode::concatenate(OrigCallsiteMD, InlinedCallsiteMD);

862 ClonedCall->setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);

863 }

864

865

866 MDNode *OrigMemProfMD = ClonedCall->getMetadata(LLVMContext::MD_memprof);

867 if (!OrigMemProfMD)

868 return;

869

870

871 assert(OrigCallsiteMD);

872

873

874 std::vector<Metadata *> NewMIBList;

875

876

877

878

879 for (auto &MIBOp : OrigMemProfMD->operands()) {

881

884

886

887 NewMIBList.push_back(MIB);

888 }

889 if (NewMIBList.empty()) {

892 return;

893 }

894 if (NewMIBList.size() < OrigMemProfMD->getNumOperands())

896}

897

898

899

900

901

902

903static void

905 bool ContainsMemProfMetadata,

909

910

911 if (!CallsiteMD && !ContainsMemProfMetadata)

912 return;

913

914

915 for (const auto &Entry : VMap) {

916

917

920 if (!OrigCall || !ClonedCall)

921 continue;

922

923

924

925 if (!CallsiteMD) {

928 continue;

929 }

931 }

932}

933

934

935

936

939 MDNode *MemParallelLoopAccess =

940 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);

941 MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);

942 MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);

944 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)

945 return;

946

949

950 if (I.mayReadOrWriteMemory())

951 continue;

952

953 if (MemParallelLoopAccess) {

954

956 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),

957 MemParallelLoopAccess);

958 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,

959 MemParallelLoopAccess);

960 }

961

962 if (AccessGroup)

964 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));

965

966 if (AliasScope)

968 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));

969

970 if (NoAlias)

972 I.getMetadata(LLVMContext::MD_noalias), NoAlias));

973 }

974 }

975}

976

977

982 if (I)

983 continue;

984

986 continue;

987

988

989 auto *CalledFn =

991 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow() &&

993 continue;

994

996 I->getOperandBundlesAsDefs(OpBundles);

997 OpBundles.emplace_back("funclet", CallSiteEHPad);

998

1001 I->replaceAllUsesWith(NewInst);

1002 I->eraseFromParent();

1003 }

1004}

1005

1006namespace {

1007

1008

1009

1010

1011class ScopedAliasMetadataDeepCloner {

1012 using MetadataMap = DenseMap<const MDNode *, TrackingMDNodeRef>;

1013 SetVector<const MDNode *> MD;

1014 MetadataMap MDMap;

1015 void addRecursiveMetadataUses();

1016

1017public:

1018 ScopedAliasMetadataDeepCloner(const Function *F);

1019

1020

1021

1022 void clone();

1023

1024

1025

1027};

1028}

1029

1030ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(

1031 const Function *F) {

1032 for (const BasicBlock &BB : *F) {

1033 for (const Instruction &I : BB) {

1034 if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))

1035 MD.insert(M);

1036 if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))

1037 MD.insert(M);

1038

1039

1041 MD.insert(Decl->getScopeList());

1042 }

1043 }

1044 addRecursiveMetadataUses();

1045}

1046

1047void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {

1049 while (Queue.empty()) {

1051 for (const Metadata *Op : M->operands())

1053 if (MD.insert(OpMD))

1054 Queue.push_back(OpMD);

1055 }

1056}

1057

1058void ScopedAliasMetadataDeepCloner::clone() {

1059 assert(MDMap.empty() && "clone() already called ?");

1060

1062 for (const MDNode *I : MD) {

1064 MDMap[I].reset(DummyNodes.back().get());

1065 }

1066

1067

1068

1069

1071 for (const MDNode *I : MD) {

1072 for (const Metadata *Op : I->operands()) {

1075 else

1077 }

1078

1079 MDNode *NewM = MDNode::get(I->getContext(), NewOps);

1082

1085 }

1086}

1087

1090 if (MDMap.empty())

1091 return;

1092

1093 for (BasicBlock &BB : make_range(FStart, FEnd)) {

1094 for (Instruction &I : BB) {

1095

1096

1097 if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))

1098 if (MDNode *MNew = MDMap.lookup(M))

1099 I.setMetadata(LLVMContext::MD_alias_scope, MNew);

1100

1101 if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))

1102 if (MDNode *MNew = MDMap.lookup(M))

1103 I.setMetadata(LLVMContext::MD_noalias, MNew);

1104

1106 if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))

1107 Decl->setScopeList(MNew);

1108 }

1109 }

1110}

1111

1112

1113

1114

1115

1120 return;

1121

1124

1125 for (const Argument &Arg : CalledFunc->args())

1126 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())

1128

1129 if (NoAliasArgs.empty())

1130 return;

1131

1132

1133

1136

1137

1138

1139

1140

1141

1142

1145

1146

1149 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {

1150 const Argument *A = NoAliasArgs[i];

1151

1152 std::string Name = std::string(CalledFunc->getName());

1153 if (A->hasName()) {

1154 Name += ": %";

1155 Name += A->getName();

1156 } else {

1157 Name += ": argument ";

1159 }

1160

1161

1162

1163

1165 NewScopes.insert(std::make_pair(A, NewScope));

1166

1168

1169

1171 auto *NoAliasDecl =

1172 IRBuilder<>(&CB).CreateNoAliasScopeDeclaration(AScopeList);

1173

1174

1175 (void)NoAliasDecl;

1176 }

1177 }

1178

1179

1180

1182 VMI != VMIE; ++VMI) {

1184 if (!VMI->second)

1185 continue;

1186

1188 if (!NI || InlinedFunctionInfo.isSimplified(I, NI))

1189 continue;

1190

1191 bool IsArgMemOnlyCall = false, IsFuncCall = false;

1193

1195 PtrArgs.push_back(LI->getPointerOperand());

1197 PtrArgs.push_back(SI->getPointerOperand());

1199 PtrArgs.push_back(VAAI->getPointerOperand());

1201 PtrArgs.push_back(CXI->getPointerOperand());

1203 PtrArgs.push_back(RMWI->getPointerOperand());

1205

1206

1207

1208 if (Call->doesNotAccessMemory())

1209 continue;

1210

1211 IsFuncCall = true;

1212 if (CalleeAAR) {

1214

1215

1217 continue;

1218

1220 IsArgMemOnlyCall = true;

1221 }

1222

1223 for (Value *Arg : Call->args()) {

1224

1225

1226

1227 if (!Arg->getType()->isPointerTy())

1228 continue;

1229

1231 }

1232 }

1233

1234

1235

1236

1237

1238 if (PtrArgs.empty() && !IsFuncCall)

1239 continue;

1240

1241

1242

1243

1246

1247 for (const Value *V : PtrArgs) {

1250

1252 }

1253

1254

1255

1256 bool RequiresNoCaptureBefore = false, UsesAliasingPtr = false,

1257 UsesUnknownObject = false;

1258 for (const Value *V : ObjSet) {

1259

1260

1261

1265 if (IsNonPtrConst)

1266 continue;

1267

1268

1269

1270

1272 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))

1273 UsesAliasingPtr = true;

1274 } else {

1275 UsesAliasingPtr = true;

1276 }

1277

1279

1280

1281 RequiresNoCaptureBefore = true;

1283

1284

1285

1286

1287 UsesUnknownObject = true;

1288 }

1289 }

1290

1291

1292

1293 if (UsesUnknownObject)

1294 continue;

1295

1296

1297

1298 if (IsFuncCall && !IsArgMemOnlyCall)

1299 RequiresNoCaptureBefore = true;

1300

1301

1302

1303

1304

1305

1306

1307

1308

1309 for (const Argument *A : NoAliasArgs) {

1311 continue;

1312

1313

1314

1315

1316

1317 if (!RequiresNoCaptureBefore ||

1319 A, false, I, &DT, false,

1322 }

1323

1325 NI->setMetadata(LLVMContext::MD_noalias,

1327 NI->getMetadata(LLVMContext::MD_noalias),

1329

1330

1331

1332

1333

1334

1335

1336

1337

1338

1339

1340 bool CanAddScopes = !UsesAliasingPtr;

1341 if (CanAddScopes && IsFuncCall)

1342 CanAddScopes = IsArgMemOnlyCall;

1343

1344 if (CanAddScopes)

1345 for (const Argument *A : NoAliasArgs) {

1346 if (ObjSet.count(A))

1347 Scopes.push_back(NewScopes[A]);

1348 }

1349

1350 if (!Scopes.empty())

1352 LLVMContext::MD_alias_scope,

1355 }

1356 }

1357}

1358

1361

1363 "Expected to be in same basic block!");

1365 assert(BeginIt != End->getIterator() && "Non-empty BB has empty iterator");

1368}

1369

1370

1371

1376 auto &Context = CalledFunction->getContext();

1377

1378

1380 bool HasAttrToPropagate = false;

1381

1382

1383

1384

1385

1386

1388 Attribute::Dereferenceable, Attribute::DereferenceableOrNull,

1389 Attribute::NonNull, Attribute::NoFPClass,

1390 Attribute::Alignment, Attribute::Range};

1391

1392 for (unsigned I = 0, E = CB.arg_size(); I < E; ++I) {

1395

1396

1398 ValidObjParamAttrs.back().addAttribute(Attribute::ReadNone);

1400 ValidObjParamAttrs.back().addAttribute(Attribute::ReadOnly);

1401

1405 ValidExactParamAttrs.back().addAttribute(Attr);

1406 }

1407

1408 HasAttrToPropagate |= ValidObjParamAttrs.back().hasAttributes();

1409 HasAttrToPropagate |= ValidExactParamAttrs.back().hasAttributes();

1410 }

1411

1412

1413 if (!HasAttrToPropagate)

1414 return;

1415

1416 for (BasicBlock &BB : *CalledFunction) {

1419 if (!InnerCB)

1420 continue;

1422 if (!NewInnerCB)

1423 continue;

1424

1425

1426 if (InlinedFunctionInfo.isSimplified(InnerCB, NewInnerCB))

1427 continue;

1428

1429 AttributeList AL = NewInnerCB->getAttributes();

1430 for (unsigned I = 0, E = InnerCB->arg_size(); I < E; ++I) {

1431

1432

1433

1434

1435 if (NewInnerCB->paramHasAttr(I, Attribute::ByVal))

1436 continue;

1437

1438

1439 if (match(NewInnerCB->getArgOperand(I),

1441 continue;

1442

1443

1445 unsigned ArgNo;

1446 if (Arg) {

1448

1449

1450

1451

1452

1453 AttrBuilder NewAB{

1454 Context, AttributeSet::get(Context, ValidExactParamAttrs[ArgNo])};

1455 if (AL.getParamDereferenceableBytes(I) >

1456 NewAB.getDereferenceableBytes())

1457 NewAB.removeAttribute(Attribute::Dereferenceable);

1458 if (AL.getParamDereferenceableOrNullBytes(I) >

1459 NewAB.getDereferenceableOrNullBytes())

1460 NewAB.removeAttribute(Attribute::DereferenceableOrNull);

1461 if (AL.getParamAlignment(I).valueOrOne() >

1462 NewAB.getAlignment().valueOrOne())

1463 NewAB.removeAttribute(Attribute::Alignment);

1464 if (auto ExistingRange = AL.getParamRange(I)) {

1465 if (auto NewRange = NewAB.getRange()) {

1468 NewAB.removeAttribute(Attribute::Range);

1469 NewAB.addRangeAttr(CombinedRange);

1470 }

1471 }

1472

1473 if (FPClassTest ExistingNoFP = AL.getParamNoFPClass(I))

1474 NewAB.addNoFPClassAttr(ExistingNoFP | NewAB.getNoFPClass());

1475

1476 AL = AL.addParamAttributes(Context, I, NewAB);

1477 } else if (NewInnerCB->getArgOperand(I)->getType()->isPointerTy()) {

1478

1479 const Value *UnderlyingV =

1482 if (!Arg)

1483 continue;

1485 } else {

1486 continue;

1487 }

1488

1489

1490 AL = AL.addParamAttributes(Context, I, ValidObjParamAttrs[ArgNo]);

1491

1492

1493

1494

1495

1496

1497 if (AL.hasParamAttr(I, Attribute::ReadOnly) &&

1498 AL.hasParamAttr(I, Attribute::WriteOnly))

1499 AL = AL.addParamAttribute(Context, I, Attribute::ReadNone);

1500

1501

1502 if (AL.hasParamAttr(I, Attribute::ReadNone)) {

1503 AL = AL.removeParamAttribute(Context, I, Attribute::ReadOnly);

1504 AL = AL.removeParamAttribute(Context, I, Attribute::WriteOnly);

1505 }

1506

1507

1508 if (AL.hasParamAttr(I, Attribute::ReadOnly) ||

1509 AL.hasParamAttr(I, Attribute::ReadNone))

1510 AL = AL.removeParamAttribute(Context, I, Attribute::Writable);

1511 }

1512 NewInnerCB->setAttributes(AL);

1513 }

1514 }

1515}

1516

1517

1518

1519

1520

1521

1522

1526 Valid.addDereferenceableAttr(DerefBytes);

1528 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);

1529 if (CB.hasRetAttr(Attribute::NoAlias))

1530 Valid.addAttribute(Attribute::NoAlias);

1531 if (CB.hasRetAttr(Attribute::NoUndef))

1532 Valid.addAttribute(Attribute::NoUndef);

1533 return Valid;

1534}

1535

1536

1537

1540 if (CB.hasRetAttr(Attribute::NonNull))

1541 Valid.addAttribute(Attribute::NonNull);

1542 if (CB.hasRetAttr(Attribute::Alignment))

1543 Valid.addAlignmentAttr(CB.getRetAlign());

1544 if (std::optional Range = CB.getRange())

1545 Valid.addRangeAttr(*Range);

1546 return Valid;

1547}

1548

1553 if (!ValidUB.hasAttributes() && !ValidPG.hasAttributes())

1554 return;

1556 auto &Context = CalledFunction->getContext();

1557

1558 for (auto &BB : *CalledFunction) {

1561 continue;

1563

1564

1565

1567 if (!NewRetVal)

1568 continue;

1569

1570

1571

1572 if (InlinedFunctionInfo.isSimplified(RetVal, NewRetVal))

1573 continue;

1574

1575

1576

1577

1578

1579

1580

1581

1582

1583

1584

1585

1586

1587

1588

1589

1590

1591

1592 if (RI->getParent() != RetVal->getParent() ||

1594 continue;

1595

1596

1597

1598

1599

1600

1601 AttributeList AL = NewRetVal->getAttributes();

1602 if (ValidUB.getDereferenceableBytes() < AL.getRetDereferenceableBytes())

1603 ValidUB.removeAttribute(Attribute::Dereferenceable);

1604 if (ValidUB.getDereferenceableOrNullBytes() <

1605 AL.getRetDereferenceableOrNullBytes())

1606 ValidUB.removeAttribute(Attribute::DereferenceableOrNull);

1607 AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);

1608

1609

1610

1611

1612

1613

1614

1615

1616

1617

1618

1619

1620

1621

1622

1623

1624

1625

1626

1627

1628

1629

1630

1631

1632

1633

1634

1635

1636

1637

1638

1639

1640 if (ValidPG.getAlignment().valueOrOne() < AL.getRetAlignment().valueOrOne())

1641 ValidPG.removeAttribute(Attribute::Alignment);

1642 if (ValidPG.hasAttributes()) {

1643 Attribute CBRange = ValidPG.getAttribute(Attribute::Range);

1644 if (CBRange.isValid()) {

1645 Attribute NewRange = AL.getRetAttr(Attribute::Range);

1646 if (NewRange.isValid()) {

1647 ValidPG.addRangeAttr(

1649 }

1650 }

1651

1652

1653

1654

1655

1656

1657

1658

1659

1660

1661

1662 if (CB.hasRetAttr(Attribute::NoUndef) ||

1663 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))

1664 NewAL = NewAL.addRetAttributes(Context, ValidPG);

1665 }

1666 NewRetVal->setAttributes(NewAL);

1667 }

1668}

1669

1670

1671

1674 return;

1675

1678

1679

1680

1682 bool DTCalculated = false;

1683

1686 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||

1687 Arg.use_empty())

1688 continue;

1689 MaybeAlign Alignment = Arg.getParamAlign();

1690 if (!Alignment)

1691 continue;

1692

1693 if (!DTCalculated) {

1695 DTCalculated = true;

1696 }

1697

1698

1701 continue;

1702

1704 DL, ArgVal, Alignment->value());

1706 }

1707}

1708

1715

1717 Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));

1718

1719 Align DstAlign = Dst->getPointerAlignment(M->getDataLayout());

1720

1721

1722 CallInst *CI = Builder.CreateMemCpy(Dst, DstAlign, Src, SrcAlign, Size);

1723

1724

1725

1726

1730}

1731

1732

1733

1740 const DataLayout &DL = Caller->getDataLayout();

1741

1742

1743

1744

1746

1747

1748

1749 if (ByValAlignment.valueOrOne() == 1)

1750 return Arg;

1751

1754

1755

1756

1758 *ByValAlignment)

1759 return Arg;

1760

1761

1762

1763 }

1764

1765

1766 Align Alignment = DL.getPrefTypeAlign(ByValType);

1767

1768

1769

1770

1771 if (ByValAlignment)

1772 Alignment = std::max(Alignment, *ByValAlignment);

1773

1776 nullptr, Alignment, Arg->getName());

1778 NewAlloca->insertBefore(Caller->begin()->begin());

1780

1781

1782

1783 return NewAlloca;

1784}

1785

1786

1790 return true;

1791 return false;

1792}

1793

1794

1795

1798 Type *Int8PtrTy =

1799 PointerType::get(Ty->getContext(), Ty->getPointerAddressSpace());

1800 if (Ty == Int8PtrTy)

1802

1803

1805 if (U->getType() != Int8PtrTy) continue;

1806 if (U->stripPointerCasts() != AI) continue;

1808 return true;

1809 }

1810 return false;

1811}

1812

1813

1814

1815

1819

1820

1821

1830

1831

1832

1834 Instruction *TheCall, bool CalleeHasDebugInfo) {

1836 return;

1837

1838

1839

1840

1841

1842

1844

1846 DILocation *InlinedAtNode = TheCallDL;

1847

1848

1849

1851 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),

1852 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());

1853

1854

1855

1856

1858

1859

1860

1861 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");

1862

1863

1865

1866

1867 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,

1871 return MD;

1872 };

1874

1875 if (!NoInlineLineTables)

1879 I.setDebugLoc(IDL);

1880 return;

1881 }

1882

1883 if (CalleeHasDebugInfo && !NoInlineLineTables)

1884 return;

1885

1886

1887

1888

1889

1890

1891

1892

1895 return;

1896

1897

1898

1899

1901 return;

1902

1903 I.setDebugLoc(TheCallDL);

1904 };

1905

1906

1907 auto UpdateDVR = [&](DbgRecord *DVR) {

1908 assert(DVR->getDebugLoc() && "Debug Value must have debug loc");

1909 if (NoInlineLineTables) {

1910 DVR->setDebugLoc(TheCallDL);

1911 return;

1912 }

1916 DVR->getMarker()->getParent()->getContext(), IANodes);

1917 DVR->setDebugLoc(IDL);

1918 };

1919

1920

1921 for (; FI != Fn->end(); ++FI) {

1923 UpdateInst(I);

1924 for (DbgRecord &DVR : I.getDbgRecordRange()) {

1925 UpdateDVR(&DVR);

1926 }

1927 }

1928

1929

1930 if (NoInlineLineTables) {

1932 while (BI != FI->end()) {

1933 BI->dropDbgRecords();

1934 ++BI;

1935 }

1936 }

1937 }

1938}

1939

1940#undef DEBUG_TYPE

1941#define DEBUG_TYPE "assignment-tracking"

1942

1947

1949 errs() << "# Finding caller local variables escaped by callee\n");

1950 for (const Value *Arg : CB.args()) {

1952 if (!Arg->getType()->isPointerTy()) {

1954 continue;

1955 }

1956

1958 if (I) {

1959 LLVM_DEBUG(errs() << " | SKIP: Not result of instruction\n");

1960 continue;

1961 }

1962

1963

1964 assert(Arg->getType()->isPtrOrPtrVectorTy());

1965 APInt TmpOffset(DL.getIndexTypeSizeInBits(Arg->getType()), 0, false);

1967 Arg->stripAndAccumulateConstantOffsets(DL, TmpOffset, true));

1968 if (Base) {

1969 LLVM_DEBUG(errs() << " | SKIP: Couldn't walk back to base storage\n");

1970 continue;

1971 }

1972

1975

1977 continue;

1978

1979

1980 auto CollectAssignsForStorage = [&](DbgVariableRecord *DbgAssign) {

1981

1982 if (DbgAssign->getDebugLoc().getInlinedAt())

1983 return;

1984 LLVM_DEBUG(errs() << " > DEF : " << *DbgAssign << "\n");

1986 };

1988 }

1989 return EscapedLocals;

1990}

1991

1995 << Start->getParent()->getName() << " from "

1999}

2000

2001

2002

2003

2006

2007

2008 for (auto BBI = Start; BBI != End; ++BBI) {

2011 }

2012}

2013#undef DEBUG_TYPE

2014#define DEBUG_TYPE "inline-function"

2015

2016

2017

2018

2019

2020

2021

2026 const BasicBlock &CalleeEntryBlock) {

2028 for (auto Entry : VMap) {

2030 continue;

2034 if (!ClonedBBs.insert(ClonedBB).second) {

2035

2036

2037

2039 if (NewFreq > Freq)

2040 Freq = NewFreq;

2041 }

2043 }

2046 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock), ClonedBBs);

2047}

2048

2049

2055 return;

2056 auto CallSiteCount =

2057 PSI ? PSI->getProfileCount(TheCall, CallerBFI) : std::nullopt;

2058 int64_t CallCount =

2059 std::min(CallSiteCount.value_or(0), CalleeEntryCount.getCount());

2061}

2062

2064 Function *Callee, int64_t EntryDelta,

2066 auto CalleeCount = Callee->getEntryCount();

2067 if (!CalleeCount)

2068 return;

2069

2070 const uint64_t PriorEntryCount = CalleeCount->getCount();

2071

2072

2073

2074 const uint64_t NewEntryCount =

2075 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)

2076 ? 0

2077 : PriorEntryCount + EntryDelta;

2078

2079 auto updateVTableProfWeight = [](CallBase *CB, const uint64_t NewEntryCount,

2080 const uint64_t PriorEntryCount) {

2082 if (VPtr)

2083 scaleProfData(*VPtr, NewEntryCount, PriorEntryCount);

2084 };

2085

2086

2087 if (VMap) {

2088 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;

2089 for (auto Entry : *VMap) {

2092 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);

2093 updateVTableProfWeight(CI, CloneEntryCount, PriorEntryCount);

2094 }

2095

2098 II->updateProfWeight(CloneEntryCount, PriorEntryCount);

2099 updateVTableProfWeight(II, CloneEntryCount, PriorEntryCount);

2100 }

2101 }

2102 }

2103

2104 if (EntryDelta) {

2105 Callee->setEntryCount(NewEntryCount);

2106

2108

2109 if (!VMap || VMap->count(&BB))

2112 CI->updateProfWeight(NewEntryCount, PriorEntryCount);

2113 updateVTableProfWeight(CI, NewEntryCount, PriorEntryCount);

2114 }

2116 II->updateProfWeight(NewEntryCount, PriorEntryCount);

2117 updateVTableProfWeight(II, NewEntryCount, PriorEntryCount);

2118 }

2119 }

2120 }

2121}

2122

2123

2124

2125

2126

2127

2128

2129

2130

2131

2132

2133

2134

2135

2136

2137

2138

2139

2140static void

2145 IsUnsafeClaimRV = !IsRetainRV;

2146

2147 for (auto *RI : Returns) {

2149 bool InsertRetainCall = IsRetainRV;

2151

2152

2153

2157

2159 continue;

2160

2162 if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||

2163 II->use_empty() ||

2165 break;

2166

2167

2168

2169

2170

2171

2172 if (IsUnsafeClaimRV) {

2173 Builder.SetInsertPoint(II);

2174 Builder.CreateIntrinsic(Intrinsic::objc_release, RetOpnd);

2175 }

2176 II->eraseFromParent();

2177 InsertRetainCall = false;

2178 break;

2179 }

2180

2182

2183 if (!CI)

2184 break;

2185

2188 break;

2189

2190

2191

2196 NewCall->copyMetadata(*CI);

2197 CI->replaceAllUsesWith(NewCall);

2198 CI->eraseFromParent();

2199 InsertRetainCall = false;

2200 break;

2201 }

2202

2203 if (InsertRetainCall) {

2204

2205

2206

2207 Builder.SetInsertPoint(RI);

2208 Builder.CreateIntrinsic(Intrinsic::objc_retain, RetOpnd);

2209 }

2210 }

2211}

2212

2213

2214

2215

2216

2217

2218

2219

2220

2221

2222

2223

2224

2225

2226

2227

2228

2229

2230

2231

2232

2233

2234static std::pair<std::vector<int64_t>, std::vector<int64_t>>

2238

2239

2240

2241

2242 std::vector<int64_t> CalleeCounterMap;

2243 std::vector<int64_t> CalleeCallsiteMap;

2244 CalleeCounterMap.resize(CalleeCounters, -1);

2245 CalleeCallsiteMap.resize(CalleeCallsites, -1);

2246

2248 if (Ins.getNameValue() == &Caller)

2249 return false;

2250 const auto OldID = static_cast<uint32_t>(Ins.getIndex()->getZExtValue());

2251 if (CalleeCounterMap[OldID] == -1)

2253 const auto NewID = static_cast<uint32_t>(CalleeCounterMap[OldID]);

2254

2255 Ins.setNameValue(&Caller);

2256 Ins.setIndex(NewID);

2257 return true;

2258 };

2259

2260 auto RewriteCallsiteInsIfNeeded = [&](InstrProfCallsite &Ins) -> bool {

2261 if (Ins.getNameValue() == &Caller)

2262 return false;

2263 const auto OldID = static_cast<uint32_t>(Ins.getIndex()->getZExtValue());

2264 if (CalleeCallsiteMap[OldID] == -1)

2266 const auto NewID = static_cast<uint32_t>(CalleeCallsiteMap[OldID]);

2267

2268 Ins.setNameValue(&Caller);

2269 Ins.setIndex(NewID);

2270 return true;

2271 };

2272

2273 std::deque<BasicBlock *> Worklist;

2275

2276

2277

2278

2279

2280

2281

2282

2283

2284

2285

2286

2287

2288

2289

2290 Worklist.push_back(StartBB);

2291 while (!Worklist.empty()) {

2292 auto *BB = Worklist.front();

2293 Worklist.pop_front();

2296 if (BBID) {

2297 Changed |= RewriteInstrIfNeeded(*BBID);

2298

2299

2300

2301 BBID->moveBefore(BB->getFirstInsertionPt());

2302 }

2306

2307

2308

2309

2310

2311

2314 Inc->eraseFromParent();

2315 } else {

2317 RewriteInstrIfNeeded(*Inc);

2318 }

2319 } else if (Inc != BBID) {

2320

2321

2322

2323

2324 Inc->eraseFromParent();

2326 }

2328 Changed |= RewriteCallsiteInsIfNeeded(*CS);

2329 }

2330 }

2333 if (Seen.insert(Succ).second)

2334 Worklist.push_back(Succ);

2335 }

2336

2338 "Counter index mapping should be either to -1 or to non-zero index, "

2339 "because the 0 "

2340 "index corresponds to the entry BB of the caller");

2342 "Callsite index mapping should be either to -1 or to non-zero index, "

2343 "because there should have been at least a callsite - the inlined one "

2344 "- which would have had a 0 index.");

2345

2346 return {std::move(CalleeCounterMap), std::move(CalleeCallsiteMap)};

2347}

2348

2349

2350

2351

2352

2353

2354

2355

2356

2357

2358

2359

2360

2361

2362

2365 bool MergeAttributes, AAResults *CalleeAAR, bool InsertLifetime,

2368 return InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,

2369 ForwardVarArgsTo, ORE);

2370

2374

2375

2376

2377

2380 const auto CallsiteID =

2381 static_cast<uint32_t>(CallsiteIDIns->getIndex()->getZExtValue());

2382

2383 const auto NumCalleeCounters = CtxProf.getNumCounters(Callee);

2384 const auto NumCalleeCallsites = CtxProf.getNumCallsites(Callee);

2385

2386 auto Ret = InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,

2387 ForwardVarArgsTo, ORE);

2388 if (!Ret.isSuccess())

2389 return Ret;

2390

2391

2392

2393 CallsiteIDIns->eraseFromParent();

2394

2395

2396

2397

2398 const auto IndicesMaps = remapIndices(Caller, StartBB, CtxProf,

2399 NumCalleeCounters, NumCalleeCallsites);

2401

2404 const auto &[CalleeCounterMap, CalleeCallsiteMap] = IndicesMaps;

2406 (Ctx.counters().size() +

2407 llvm::count_if(CalleeCounterMap, [](auto V) { return V != -1; }) ==

2408 NewCountersSize) &&

2409 "The caller's counters size should have grown by the number of new "

2410 "distinct counters inherited from the inlined callee.");

2411 Ctx.resizeCounters(NewCountersSize);

2412

2413

2414

2415 auto CSIt = Ctx.callsites().find(CallsiteID);

2416 if (CSIt == Ctx.callsites().end())

2417 return;

2418 auto CalleeCtxIt = CSIt->second.find(CalleeGUID);

2419

2420

2421 if (CalleeCtxIt == CSIt->second.end())

2422 return;

2423

2424

2425

2426 auto &CalleeCtx = CalleeCtxIt->second;

2427 assert(CalleeCtx.guid() == CalleeGUID);

2428

2429 for (auto I = 0U; I < CalleeCtx.counters().size(); ++I) {

2430 const int64_t NewIndex = CalleeCounterMap[I];

2431 if (NewIndex >= 0) {

2432 assert(NewIndex != 0 && "counter index mapping shouldn't happen to a 0 "

2433 "index, that's the caller's entry BB");

2434 Ctx.counters()[NewIndex] = CalleeCtx.counters()[I];

2435 }

2436 }

2437 for (auto &[I, OtherSet] : CalleeCtx.callsites()) {

2438 const int64_t NewCSIdx = CalleeCallsiteMap[I];

2439 if (NewCSIdx >= 0) {

2440 assert(NewCSIdx != 0 &&

2441 "callsite index mapping shouldn't happen to a 0 index, the "

2442 "caller must've had at least one callsite (with such an index)");

2443 Ctx.ingestAllContexts(NewCSIdx, std::move(OtherSet));

2444 }

2445 }

2446

2447

2448

2449 auto Deleted = Ctx.callsites().erase(CallsiteID);

2452 };

2453 CtxProf.update(Updater, Caller);

2454 return Ret;

2455}

2456

2460

2461

2464

2465

2467

2469 if (!CalledFunc ||

2472

2473

2474

2479

2481 continue;

2482

2484 continue;

2486 continue;

2488 continue;

2491 continue;

2492 }

2493

2495 }

2496 }

2497

2498

2499

2500

2501

2502

2503

2504

2505

2506

2511 "convergent call needs convergencectrl operand");

2512 }

2513 }

2514

2517

2518

2519

2520

2521

2522 if (CalledFunc->hasGC()) {

2523 if (Caller->hasGC() && CalledFunc->getGC() != Caller->getGC())

2525 }

2526

2527

2528 Constant *CalledPersonality =

2531 : nullptr;

2532

2533

2534

2535

2536 Constant *CallerPersonality =

2537 Caller->hasPersonalityFn()

2538 ? Caller->getPersonalityFn()->stripPointerCasts()

2539 : nullptr;

2540 if (CalledPersonality) {

2541

2542

2543

2544

2545 if (CallerPersonality && CalledPersonality != CallerPersonality)

2547 }

2548

2549

2550

2551 if (CallerPersonality) {

2554 std::optional ParentFunclet =

2556 if (ParentFunclet)

2558

2559

2560

2563

2564

2566

2567

2568 for (const BasicBlock &CalledBB : *CalledFunc) {

2571 }

2572 }

2574

2575

2576 for (const BasicBlock &CalledBB : *CalledFunc) {

2577 if (CalledBB.isEHPad())

2579 }

2580 }

2581 }

2582 }

2583 }

2584

2586}

2587

2588

2589

2590

2591

2592

2593

2594

2595

2597 bool MergeAttributes, AAResults *CalleeAAR,

2598 bool InsertLifetime, Function *ForwardVarArgsTo,

2604 "CanInlineCallSite should have verified direct call to definition");

2605

2606

2607

2608 bool EHPadForCallUnwindsLocally = false;

2611 Value *CallSiteUnwindDestToken =

2613

2614 EHPadForCallUnwindsLocally =

2615 CallSiteUnwindDestToken &&

2617 }

2618

2619

2620

2622

2623

2624

2628

2629

2630

2631

2632

2633 if (CalledFunc->hasGC()) {

2634 if (!Caller->hasGC())

2635 Caller->setGC(CalledFunc->getGC());

2636 else {

2637 assert(CalledFunc->getGC() == Caller->getGC() &&

2638 "CanInlineCallSite should have verified compatible GCs");

2639 }

2640 }

2641

2643 Constant *CalledPersonality =

2645 if (!Caller->hasPersonalityFn()) {

2646 Caller->setPersonalityFn(CalledPersonality);

2647 } else

2648 assert(Caller->getPersonalityFn()->stripPointerCasts() ==

2649 CalledPersonality &&

2650 "CanInlineCallSite should have verified compatible personality");

2651 }

2652

2653 {

2655 struct ByValInit {

2660 };

2661

2662

2663

2665

2666

2667

2668

2669

2670

2671

2672 ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());

2673

2674 auto &DL = Caller->getDataLayout();

2675

2676

2677

2679 unsigned ArgNo = 0;

2681 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {

2682 Value *ActualArg = *AI;

2683

2684

2685

2686

2687

2690 &CB, CalledFunc, IFI,

2692 if (ActualArg != *AI)

2696 }

2697

2698 VMap[&*I] = ActualArg;

2699 }

2700

2701

2702

2703

2704

2706

2709

2710

2712

2713

2714

2715

2716

2718 false, Returns, ".i",

2719 &InlinedFunctionInfo);

2720

2721 FirstNewBlock = LastBlock; ++FirstNewBlock;

2722

2723

2727

2728

2729

2730

2731

2734

2736 CalledFunc->front());

2737

2741 }

2742

2743

2744 for (ByValInit &Init : ByValInits)

2746 Caller->getParent(), &*FirstNewBlock, IFI,

2747 CalledFunc);

2748

2749 std::optional ParentDeopt =

2751 if (ParentDeopt) {

2753

2756 if (!ICS)

2757 continue;

2758

2760

2762

2764 ++COBi) {

2767

2769 continue;

2770 }

2771

2772

2773

2774

2775

2776 std::vector<Value *> MergedDeoptArgs;

2777 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +

2778 ChildOB.Inputs.size());

2779

2782

2783 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));

2784 }

2785

2787

2788

2789

2791

2792 VH = nullptr;

2794 }

2795 }

2796

2797

2798

2799

2802

2804

2806

2807

2808

2810 }

2811

2812

2813 SAMetadataCloner.clone();

2814 SAMetadataCloner.remap(FirstNewBlock, Caller->end());

2815

2816

2818

2819

2820

2822

2823

2824

2826

2829

2830

2832

2833

2836 make_range(FirstNewBlock->getIterator(), Caller->end()))

2840 }

2841

2844 if (IntrinsicCall) {

2847 }

2848 }

2849

2850

2851

2852

2853

2854 {

2857 E = FirstNewBlock->end(); I != E; ) {

2859 if (!AI) continue;

2860

2861

2862

2865 continue;

2866 }

2867

2869 continue;

2870

2871

2873

2874

2875

2880 ++I;

2881 }

2882

2883

2884

2885

2886 I.setTailBit(true);

2887 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,

2889 }

2890 }

2891

2892

2893

2895

2902 }

2903

2904 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;

2908 CallSiteTailKind = CI->getTailCallKind();

2909

2910

2913

2914 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;

2915 ++BB) {

2918 if (!CI)

2919 continue;

2920

2921

2922

2923 if (!VarArgsToForward.empty() &&

2924 ((ForwardVarArgsTo &&

2927

2930 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {

2931 for (unsigned ArgNo = 0;

2933 ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));

2934 }

2935

2936

2937 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());

2938 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(),

2939 Attrs.getRetAttrs(), ArgAttrs);

2940

2942 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());

2950 CI = NewCI;

2951 }

2952

2954 InlinedDeoptimizeCalls |=

2955 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;

2956

2957

2958

2959

2960

2961

2962

2963

2964

2965

2966

2967

2968

2969

2970

2971

2974 ChildTCK = std::min(CallSiteTailKind, ChildTCK);

2977

2978

2979

2980

2981

2984 }

2985 }

2986 }

2987

2988

2989

2990

2991

2992

2993 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&

2995 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());

2997

2999 continue;

3000

3001

3002

3004 continue;

3005

3008 continue;

3009

3012

3013

3014 if (InlinedMustTailCalls &&

3015 RI->getParent()->getTerminatingMustTailCall())

3016 continue;

3017 if (InlinedDeoptimizeCalls &&

3018 RI->getParent()->getTerminatingDeoptimizeCall())

3019 continue;

3021 }

3022 }

3023 }

3024

3025

3026

3028

3029 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())

3030 .CreateStackSave("savedstack");

3031

3032

3033

3035

3036

3037 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())

3038 continue;

3039 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())

3040 continue;

3041 IRBuilder<>(RI).CreateStackRestore(SavedPtr);

3042 }

3043 }

3044

3045

3046

3047

3048

3050 BasicBlock *UnwindDest = II->getUnwindDest();

3054 } else {

3056 }

3057 }

3058

3059

3060

3061

3064 E = Caller->end();

3065 BB != E; ++BB) {

3066

3068

3069

3070

3071

3072

3074 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)

3076

3078 if (I->isEHPad())

3079 continue;

3080

3084 } else {

3088 }

3089 }

3090 }

3091

3092 if (InlinedDeoptimizeCalls) {

3093

3094

3095

3096

3097

3098 if (Caller->getReturnType() == CB.getType()) {

3100 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;

3101 });

3102 } else {

3105 Caller->getParent(), Intrinsic::experimental_deoptimize,

3106 {Caller->getReturnType()});

3107

3109 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();

3110 if (!DeoptCall) {

3112 continue;

3113 }

3114

3115

3116

3117

3118

3119

3124

3126

3129 auto DeoptAttributes = DeoptCall->getAttributes();

3132 "Expected at least the deopt operand bundle");

3133

3136 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);

3140 Builder.CreateRetVoid();

3141 else

3142 Builder.CreateRet(NewDeoptCall);

3143

3144 NewDeoptCall->removeRetAttrs(AttributeFuncs::typeIncompatible(

3146 }

3147

3148

3149 std::swap(Returns, NormalReturns);

3150 }

3151 }

3152

3153

3154

3155

3156

3157 if (InlinedMustTailCalls) {

3158

3159 Type *NewRetTy = Caller->getReturnType();

3160 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;

3161

3162

3165 CallInst *ReturnedMustTail =

3166 RI->getParent()->getTerminatingMustTailCall();

3167 if (!ReturnedMustTail) {

3169 continue;

3170 }

3171 if (!NeedBitCast)

3172 continue;

3173

3174

3178 if (OldCast)

3179 OldCast->eraseFromParent();

3180

3181

3183 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));

3184 }

3185

3186

3187 std::swap(Returns, NormalReturns);

3188 }

3189

3190

3191

3192

3193

3194

3195

3197

3199 make_range(FirstNewBlock->getIterator(), Caller->end()))

3205 }

3206

3207

3208

3209

3210 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {

3211

3212 OrigBB->splice(CB.getIterator(), &*FirstNewBlock, FirstNewBlock->begin(),

3213 FirstNewBlock->end());

3214

3215 Caller->back().eraseFromParent();

3216

3217

3218

3222 }

3223

3224

3225

3228 if (&CB == R->getReturnValue())

3230 else

3232 }

3233

3235

3236

3237 Returns[0]->eraseFromParent();

3238

3239 if (MergeAttributes)

3240 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);

3241

3242

3243 return;

3244 }

3245

3246

3247

3248

3249

3250

3251

3253 BranchInst *CreatedBranchToNormalDest = nullptr;

3255

3256

3258

3260

3261

3262

3263

3264 AfterCallBB =

3266 CalledFunc->getName() + ".exit");

3267

3268 } else {

3269

3270

3271

3273 CalledFunc->getName() + ".exit");

3274 }

3275

3277

3280 }

3281

3282

3283

3284

3287 "splitBasicBlock broken!");

3289

3290

3291

3292

3293 Caller->splice(AfterCallBB->getIterator(), Caller, FirstNewBlock,

3294 Caller->end());

3295

3296

3297

3299

3301 if (Returns.size() > 1) {

3302

3303

3306 PHI->insertBefore(AfterCallBB->begin());

3307

3308

3310 }

3311

3312

3313

3314 if (PHI) {

3317 "Ret value not consistent in function!");

3318 PHI->addIncoming(RI->getReturnValue(), RI->getParent());

3319 }

3320 }

3321

3322

3329 }

3330

3331

3332

3333

3334 if (CreatedBranchToNormalDest)

3336 } else if (!Returns.empty()) {

3337

3338

3340 if (&CB == Returns[0]->getReturnValue())

3342 else

3344 }

3345

3346

3347 BasicBlock *ReturnBB = Returns[0]->getParent();

3349

3350

3351

3352 AfterCallBB->splice(AfterCallBB->begin(), ReturnBB);

3353

3354 if (CreatedBranchToNormalDest)

3356

3357

3358 Returns[0]->eraseFromParent();

3361

3362

3363

3364

3365 if (CreatedBranchToNormalDest)

3367

3368

3370 }

3371

3372

3374

3375

3376

3377 if (InlinedMustTailCalls && pred_empty(AfterCallBB))

3379

3380

3381

3384

3385

3386

3389

3390

3392

3393

3395

3396

3397

3398

3399 if (PHI) {

3402 auto &DL = Caller->getDataLayout();

3404 PHI->replaceAllUsesWith(V);

3405 PHI->eraseFromParent();

3406 }

3407 }

3408

3409 if (MergeAttributes)

3410 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);

3411}

3412

3414 bool MergeAttributes,

3416 bool InsertLifetime,

3420 if (Result.isSuccess()) {

3421 InlineFunctionImpl(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,

3422 ForwardVarArgsTo, ORE);

3423 }

3424

3425 return Result;

3426}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

This file contains the simple types necessary to represent the attributes associated with functions a...

static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)

Update the PHI nodes in OrigBB to include the values coming from NewBB.

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)

This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...

This file contains the declarations for the subclasses of Constant, which represent the different fla...

This file defines the DenseMap class.

This file provides various utilities for inspecting and working with the control flow graph in LLVM I...

Module.h This file contains the declarations for the Module class.

static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)

Definition InlineFunction.cpp:1523

DenseMap< Instruction *, Value * > UnwindDestMemoTy

Definition InlineFunction.cpp:261

static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)

Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.

Definition InlineFunction.cpp:1943

static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)

Update inlined instructions' line numbers to to encode location where these instructions are inlined.

Definition InlineFunction.cpp:1833

static void removeCallsiteMetadata(CallBase *Call)

Definition InlineFunction.cpp:825

static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)

Given an EH pad, find where it unwinds.

Definition InlineFunction.cpp:417

static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap, OptimizationRemarkEmitter *ORE)

Definition InlineFunction.cpp:904

static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))

static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)

If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...

Definition InlineFunction.cpp:620

static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)

Helper for getUnwindDestToken that does the descendant-ward part of the search.

Definition InlineFunction.cpp:265

static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)

When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...

Definition InlineFunction.cpp:561

static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)

Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.

Definition InlineFunction.cpp:1822

static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))

static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)

When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...

Definition InlineFunction.cpp:937

static std::pair< std::vector< int64_t >, std::vector< int64_t > > remapIndices(Function &Caller, BasicBlock *StartBB, PGOContextualProfile &CtxProf, uint32_t CalleeCounters, uint32_t CalleeCallsites)

Definition InlineFunction.cpp:2235

static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)

Definition InlineFunction.cpp:1538

static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList, OptimizationRemarkEmitter *ORE)

Definition InlineFunction.cpp:829

static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)

Update the branch metadata for cloned call instructions.

Definition InlineFunction.cpp:2050

static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)

Update the block frequencies of the caller after a callee has been inlined.

Definition InlineFunction.cpp:2022

static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)

Definition InlineFunction.cpp:1549

static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, MaybeAlign SrcAlign, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)

Definition InlineFunction.cpp:1709

static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)

Definition InlineFunction.cpp:1359

static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))

static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)

If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...

Definition InlineFunction.cpp:1116

static IntrinsicInst * getConvergenceEntry(BasicBlock &BB)

Definition InlineFunction.cpp:185

static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)

If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...

Definition InlineFunction.cpp:677

static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)

An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...

Definition InlineFunction.cpp:2141

static void fixupAssignments(Function::iterator Start, Function::iterator End)

Update inlined instructions' DIAssignID metadata.

Definition InlineFunction.cpp:2004

static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD, OptimizationRemarkEmitter *ORE)

Definition InlineFunction.cpp:849

static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)

Return the result of AI->isStaticAlloca() if AI were moved to the entry block.

Definition InlineFunction.cpp:1816

static bool isUsedByLifetimeMarker(Value *V)

Definition InlineFunction.cpp:1787

static void removeMemProfMetadata(CallBase *Call)

Definition InlineFunction.cpp:821

static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)

When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...

Definition InlineFunction.cpp:1734

static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)

If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...

Definition InlineFunction.cpp:1672

static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)

Definition InlineFunction.cpp:1992

static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))

static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)

Definition InlineFunction.cpp:1372

static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)

Definition InlineFunction.cpp:800

static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)

Bundle operands of the inlined function must be added to inlined call sites.

Definition InlineFunction.cpp:978

static bool hasLifetimeMarkers(AllocaInst *AI)

Definition InlineFunction.cpp:1796

static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)

Return the first DebugLoc that has line number information, given a range of instructions.

ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))

uint64_t IntrinsicInst * II

This file defines common analysis utilities used by the ObjC ARC Optimizer.

This file defines ARC utility functions which are used by various parts of the compiler.

This file contains the declarations for profiling metadata utility functions.

This file implements a set that has insertion order iteration characteristics.

This file defines the SmallPtrSet class.

This file defines the SmallVector class.

static Value * getParentPad(Value *EHPad)

LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call)

Return the behavior of the given call site.

Class for arbitrary precision integers.

an instruction to allocate memory on the stack

bool isSwiftError() const

Return true if this alloca is used as a swifterror argument to a call.

PointerType * getType() const

Overload to return most specific pointer type.

bool isUsedWithInAlloca() const

Return true if this alloca is used as an inalloca argument to a call.

LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const

Get allocation size in bytes.

const Value * getArraySize() const

Get the number of elements allocated.

This class represents an incoming formal argument to a Function.

unsigned getArgNo() const

Return the index of this formal argument in its containing function.

static LLVM_ABI uint64_t getGUID(const Function &F)

A cache of @llvm.assume calls within a function.

LLVM_ABI void registerAssumption(AssumeInst *CI)

Add an @llvm.assume intrinsic to this function's cache.

An instruction that atomically checks whether a specified value is in a memory location,...

an instruction that atomically reads a memory location, combines it with another value,...

static LLVM_ABI AttributeSet get(LLVMContext &C, const AttrBuilder &B)

Functions, function parameters, and return types can have attributes to indicate how they should be t...

LLVM_ABI const ConstantRange & getRange() const

Returns the value of the range attribute.

AttrKind

This enumeration lists the attributes that can be associated with parameters, function results,...

bool isValid() const

Return true if the attribute is any kind of attribute.

LLVM Basic Block Representation.

iterator begin()

Instruction iterator methods.

iterator_range< const_phi_iterator > phis() const

Returns a range that iterates over the phis in the basic block.

const Function * getParent() const

Return the enclosing method, or null if none.

LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const

Returns an iterator to the first instruction in this block that is not a PHINode instruction.

LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)

Split the basic block into two basic blocks at the specified instruction.

LLVM_ABI SymbolTableList< BasicBlock >::iterator eraseFromParent()

Unlink 'this' from the containing function and delete it.

InstListType::iterator iterator

Instruction iterators...

const Instruction * getTerminator() const LLVM_READONLY

Returns the terminator instruction if the block is well formed or null if the block is not well forme...

void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)

Transfer all instructions from FromBB to this basic block at ToIt.

LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)

Update PHI nodes in this BasicBlock before removal of predecessor Pred.

BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...

LLVM_ABI void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)

LLVM_ABI void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)

Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...

LLVM_ABI BlockFrequency getBlockFreq(const BasicBlock *BB) const

getblockFreq - Return block frequency.

Conditional or Unconditional Branch instruction.

static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

void setCallingConv(CallingConv::ID CC)

MaybeAlign getRetAlign() const

Extract the alignment of the return value.

LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const

Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.

OperandBundleUse getOperandBundleAt(unsigned Index) const

Return the operand bundle at a specific index.

std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const

Return an operand bundle by name, if present.

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

void removeRetAttrs(const AttributeMask &AttrsToRemove)

Removes the attributes from the return value.

bool hasRetAttr(Attribute::AttrKind Kind) const

Determine whether the return value has the given attribute.

unsigned getNumOperandBundles() const

Return the number of operand bundles associated with this User.

CallingConv::ID getCallingConv() const

LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Determine whether the argument or parameter has the given attribute.

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Get the attribute of a given kind from a given arg.

bool isByValArgument(unsigned ArgNo) const

Determine whether this argument is passed by value.

static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)

Create a clone of CB with operand bundle OB added.

MaybeAlign getParamAlign(unsigned ArgNo) const

Extract the alignment for a call or parameter (0=unknown).

AttributeSet getRetAttributes() const

Return the return attributes for this call.

Type * getParamByValType(unsigned ArgNo) const

Extract the byval type for a call or parameter.

Value * getCalledOperand() const

void setAttributes(AttributeList A)

Set the attributes for this call.

LLVM_ABI std::optional< ConstantRange > getRange() const

If this return value has a range attribute, return the value range of the argument.

bool doesNotThrow() const

Determine if the call cannot unwind.

Value * getArgOperand(unsigned i) const

uint64_t getRetDereferenceableBytes() const

Extract the number of dereferenceable bytes for a call or parameter (0=unknown).

bool isConvergent() const

Determine if the invoke is convergent.

FunctionType * getFunctionType() const

static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)

Create a clone of CB with a different set of operand bundles and insert it before InsertPt.

uint64_t getRetDereferenceableOrNullBytes() const

Extract the number of dereferenceable_or_null bytes for a call (0=unknown).

iterator_range< User::op_iterator > args()

Iteration adapter for range-for loops.

unsigned arg_size() const

AttributeList getAttributes() const

Return the attributes for this call.

bool hasOperandBundles() const

Return true if this User has any operand bundles.

LLVM_ABI Function * getCaller()

Helper to get the caller (the parent function).

This class represents a function call, abstracting a target machine's calling convention.

void setTailCallKind(TailCallKind TCK)

TailCallKind getTailCallKind() const

static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

bool isMustTailCall() const

static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)

This class represents a range of values.

LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const

Return the range that results from the intersection of this range with another range.

static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)

Return the ConstantTokenNone.

This is an important base class in LLVM.

const Constant * stripPointerCasts() const

static LLVM_ABI InstrProfIncrementInst * getBBInstrumentation(BasicBlock &BB)

Get the instruction instrumenting a BB, or nullptr if not present.

static LLVM_ABI InstrProfCallsite * getCallsiteInstrumentation(CallBase &CB)

Get the instruction instrumenting a callsite, or nullptr if that cannot be found.

const DILocation * getWithoutAtom() const

uint64_t getAtomGroup() const

uint8_t getAtomRank() const

Subprogram description. Uses SubclassData1.

A parsed version of the target data layout string in and methods for querying it.

Base class for non-instruction debug metadata records that have positions within IR.

Record of a variable value-assignment, aka a non instruction representation of the dbg....

static DebugLoc getCompilerGenerated()

LLVM_ABI unsigned getLine() const

LLVM_ABI DILocation * get() const

Get the underlying DILocation.

LLVM_ABI MDNode * getScope() const

static LLVM_ABI DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)

Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...

static DebugLoc getTemporary()

LLVM_ABI unsigned getCol() const

LLVM_ABI bool isImplicitCode() const

Check if the DebugLoc corresponds to an implicit code.

static DebugLoc getUnknown()

ValueT lookup(const_arg_type_t< KeyT > Val) const

lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...

iterator find(const_arg_type_t< KeyT > Val)

size_type count(const_arg_type_t< KeyT > Val) const

Return 1 if the specified key is in the map, 0 otherwise.

std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)

Implements a dense probed hash-table based set.

void recalculate(ParentType &Func)

recalculate - compute a dominator tree for the given function

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

unsigned getNumParams() const

Return the number of fixed parameters this function type requires.

Class to represent profile counts.

uint64_t getCount() const

const BasicBlock & getEntryBlock() const

BasicBlockListType::iterator iterator

FunctionType * getFunctionType() const

Returns the FunctionType for me.

const BasicBlock & front() const

iterator_range< arg_iterator > args()

DISubprogram * getSubprogram() const

Get the attached subprogram.

bool hasGC() const

hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...

CallingConv::ID getCallingConv() const

getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...

bool hasPersonalityFn() const

Check whether this function has a personality function.

Constant * getPersonalityFn() const

Get the personality function associated with this function.

bool isIntrinsic() const

isIntrinsic - Returns true if the function's name starts with "llvm.".

MaybeAlign getParamAlign(unsigned ArgNo) const

LLVMContext & getContext() const

getContext - Return a reference to the LLVMContext associated with this function.

const std::string & getGC() const

std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const

Get the entry count for this function.

Type * getReturnType() const

Returns the type of the ret val.

void setCallingConv(CallingConv::ID CC)

bool onlyReadsMemory() const

Determine if the function does not access or only reads memory.

bool hasFnAttribute(Attribute::AttrKind Kind) const

Return true if the function has the attribute.

LLVM_ABI bool isDeclaration() const

Return true if the primary definition of this global value is outside of the current translation unit...

LLVM_ABI CallInst * CreateLifetimeStart(Value *Ptr)

Create a lifetime.start intrinsic.

This provides a uniform API for creating instructions and inserting them into a basic block: either a...

This class captures the data input to the InlineFunction call, and records the auxiliary results prod...

Value * ConvergenceControlToken

bool UpdateProfile

Update profile for callee as well as cloned version.

Instruction * CallSiteEHPad

function_ref< AssumptionCache &(Function &)> GetAssumptionCache

If non-null, InlineFunction will update the callgraph to reflect the changes it makes.

BlockFrequencyInfo * CalleeBFI

SmallVector< AllocaInst *, 4 > StaticAllocas

InlineFunction fills this in with all static allocas that get copied into the caller.

BlockFrequencyInfo * CallerBFI

SmallVector< CallBase *, 8 > InlinedCallSites

All of the new call sites inlined into the caller.

InlineResult is basically true or false.

static InlineResult success()

static InlineResult failure(const char *Reason)

This represents the llvm.instrprof.callsite intrinsic.

This represents the llvm.instrprof.increment intrinsic.

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

bool hasMetadata() const

Return true if this instruction has any metadata attached to it.

LLVM_ABI void insertBefore(InstListType::iterator InsertPos)

Insert an unlinked instruction into a basic block immediately before the specified position.

LLVM_ABI InstListType::iterator eraseFromParent()

This method unlinks 'this' from the containing basic block and deletes it.

LLVM_ABI const Function * getFunction() const

Return the function this instruction belongs to.

MDNode * getMetadata(unsigned KindID) const

Get the metadata of given kind attached to this Instruction.

LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)

Set the metadata of the specified kind to the specified node.

unsigned getOpcode() const

Returns a member of one of the enums like Instruction::Add.

void setDebugLoc(DebugLoc Loc)

Set the debug location information for this instruction.

LLVM_ABI const DataLayout & getDataLayout() const

Get the data layout of the module this instruction belongs to.

A wrapper class for inspecting calls to intrinsic functions.

static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)

Check if the intrinsic might lower into a regular function call in the course of IR transformations.

This is an important class for using LLVM in a threaded context.

@ OB_clang_arc_attachedcall

The landingpad instruction holds all of the information necessary to generate correct exception handl...

bool isCleanup() const

Return 'true' if this landingpad instruction is a cleanup.

unsigned getNumClauses() const

Get the number of clauses for this landing pad.

Constant * getClause(unsigned Idx) const

Get the value of the clause at index Idx.

An instruction for reading from memory.

MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())

Return metadata appropriate for an alias scope root node.

MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())

Return metadata appropriate for an alias scope domain node.

static MDTuple * getDistinct(LLVMContext &Context, ArrayRef< Metadata * > MDs)

void replaceAllUsesWith(Metadata *MD)

RAUW a temporary.

static LLVM_ABI MDNode * concatenate(MDNode *A, MDNode *B)

Methods for metadata merging.

ArrayRef< MDOperand > operands() const

op_iterator op_end() const

static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)

unsigned getNumOperands() const

Return number of MDNode operands.

op_iterator op_begin() const

LLVMContext & getContext() const

static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)

Return a temporary node.

bool onlyAccessesInaccessibleMem() const

Whether this function only (at most) accesses inaccessible memory.

bool onlyAccessesArgPointees() const

Whether this function only (at most) accesses argument memory.

A Module instance is used to store all the information related to an LLVM module.

The instrumented contextual profile, produced by the CtxProfAnalysis.

LLVM_ABI bool isInSpecializedModule() const

LLVM_ABI void update(Visitor, const Function &F)

uint32_t getNumCounters(const Function &F) const

uint32_t allocateNextCounterIndex(const Function &F)

uint32_t getNumCallsites(const Function &F) const

uint32_t allocateNextCallsiteIndex(const Function &F)

A node (context) in the loaded contextual profile, suitable for mutation during IPO passes.

void addIncoming(Value *V, BasicBlock *BB)

Add an incoming value to the end of the PHI list.

static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...

static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)

This constructs a pointer to an object of the specified type in a numbered address space.

static LLVM_ABI PoisonValue * get(Type *T)

Static factory methods - Return an 'poison' object of the specified type.

Analysis providing profile information.

LLVM_ABI std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const

Returns the profile count for CallInst.

Resume the propagation of an exception.

Return a value (possibly void), from a function.

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

void insert_range(Range &&R)

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

bool contains(ConstPtrType Ptr) const

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void reserve(size_type N)

void append(ItTy in_start, ItTy in_end)

Add the specified range to the end of the SmallVector.

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

The instances of the Type class are immutable: once they are created, they are never changed.

LLVM_ABI unsigned getPointerAddressSpace() const

Get the address space of this pointer or pointer vector type.

bool isVoidTy() const

Return true if this is 'void'.

void setOperand(unsigned i, Value *Val)

Value * getOperand(unsigned i) const

This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...

ValueT lookup(const KeyT &Val) const

lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...

size_type count(const KeyT &Val) const

Return 1 if the specified key is in the map, 0 otherwise.

ValueMapIteratorImpl< MapT, const Value *, false > iterator

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

LLVM_ABI void replaceAllUsesWith(Value *V)

Change all uses of this to point to a new Value.

iterator_range< user_iterator > users()

LLVM_ABI LLVMContext & getContext() const

All values hold a context through their type.

LLVM_ABI StringRef getName() const

Return a constant reference to the value's name.

LLVM_ABI void takeName(Value *V)

Transfer the name from V to this value.

std::pair< iterator, bool > insert(const ValueT &V)

const ParentTy * getParent() const

self_iterator getIterator()

Class to build a trie of call stack contexts for a particular profiled allocation call,...

Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...

This provides a very simple, boring adaptor for a begin and end iterator into a range type.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

CallingConv Namespace - This namespace contains an enum with a value for the well-known calling conve...

@ BasicBlock

Various leaf nodes.

LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})

Look up the Function declaration of the intrinsic id in the Module M.

bool match(Val *V, const Pattern &P)

match_immconstant_ty m_ImmConstant()

Match an arbitrary immediate Constant and ignore it.

LLVM_ABI void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)

Track assignments to Vars between Start and End.

LLVM_ABI void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)

Replace DIAssignID uses and attachments with IDs from Map.

SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)

Return a range of dbg_assign records for which Inst performs the assignment they encode.

DenseMap< const AllocaInst *, SmallSetVector< VarRecord, 2 > > StorageToVarsMap

Map of backing storage to a set of variables that are stored to it.

initializer< Ty > init(const Ty &Val)

std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)

Extract a Value from Metadata, if any.

LLVM_ABI MDNode * getMIBStackNode(const MDNode *MIB)

Returns the stack node from an MIB metadata node.

ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)

This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...

ARCInstKind

Equivalence classes of instructions in the ARC Model.

@ None

anything that is inert from an ARC perspective.

@ RetainRV

objc_retainAutoreleasedReturnValue

std::optional< Function * > getAttachedARCFunction(const CallBase *CB)

This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...

bool isRetainOrClaimRV(ARCInstKind Kind)

Check whether the function is retainRV/unsafeClaimRV.

const Value * GetRCIdentityRoot(const Value *V)

The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...

bool hasAttachedCallOpBundle(const CallBase *CB)

This is an optimization pass for GlobalISel generic memory operations.

UnaryFunction for_each(R &&Range, UnaryFunction F)

Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.

auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)

Get the size of a range.

LLVM_ABI BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)

Convert the CallInst to InvokeInst with the specified unwind edge basic block.

LLVM_ABI InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)

This function inlines the called function into the basic block of the caller.

Definition InlineFunction.cpp:3413

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

auto successors(const MachineBasicBlock *BB)

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

void append_range(Container &C, Range &&R)

Wrapper function to append range R to container C.

iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)

Make a range that does early increment to allow mutation of the underlying range without disrupting i...

std::string utostr(uint64_t X, bool isNeg=false)

MemoryEffectsBase< IRMemLocation > MemoryEffects

Summary of how a function affects memory in the program.

bool isa_and_nonnull(const Y &Val)

LLVM_ABI bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)

PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...

LLVM_ABI InlineResult CanInlineCallSite(const CallBase &CB, InlineFunctionInfo &IFI)

Check if it is legal to perform inlining of the function called by CB into the caller at this particu...

Definition InlineFunction.cpp:2457

bool isScopedEHPersonality(EHPersonality Pers)

Returns true if this personality uses scope-style EH IR instructions: catchswitch,...

LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)

See if we can compute a simplified version of this instruction.

auto dyn_cast_or_null(const Y &Val)

Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to infer an alignment for the specified pointer.

LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to ensure that the alignment of V is at least PrefAlign bytes.

FPClassTest

Floating-point class tests, supported by 'is_fpclass' intrinsic.

LLVM_ABI void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)

This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...

LLVM_ABI void InlineFunctionImpl(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)

This should generally not be used, use InlineFunction instead.

Definition InlineFunction.cpp:2596

Function::ProfileCount ProfileCount

LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)

See if the given exception handling personality function is one that we understand.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)

Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...

LLVM_ABI raw_fd_ostream & errs()

This returns a reference to a raw_ostream for standard error.

IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >

LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)

Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.

LLVM_ABI void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)

Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...

Definition InlineFunction.cpp:2063

OperandBundleDefT< Value * > OperandBundleDef

LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)

Return true if assignment tracking is enabled for module M.

LLVM_ABI MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)

Compute the union of two access-group lists.

DWARFExpression::Operation Op

bool isAsynchronousEHPersonality(EHPersonality Pers)

Returns true if this personality function catches asynchronous exceptions.

ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy

LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)

Return true if this function can prove that the instruction I will always transfer execution to one o...

LLVM_ABI bool isEscapeSource(const Value *V)

Returns true if the pointer is one which would have been considered an escape by isNotCapturedBefore.

auto count_if(R &&Range, UnaryPredicate P)

Wrapper function around std::count_if to count the number of times an element satisfying a given pred...

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

void erase_if(Container &C, UnaryPredicate P)

Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...

bool is_contained(R &&Range, const E &Element)

Returns true if Element is found in Range.

bool capturesAnything(CaptureComponents CC)

bool pred_empty(const BasicBlock *BB)

LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)

This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....

LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)

This method is similar to getUnderlyingObject except that it can look through phi and select instruct...

LLVM_ABI void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)

Update the debug locations contained within the MD_loop metadata attached to the instruction I,...

LLVM_ABI bool isIdentifiedObject(const Value *V)

Return true if this pointer refers to a distinct and identifiable object.

LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)

Scaling the profile data attached to 'I' using the ratio of S/T.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

This struct can be used to capture information about code being cloned, while it is being cloned.

bool ContainsDynamicAllocas

This is set to true if the cloned code contains a 'dynamic' alloca.

bool isSimplified(const Value *From, const Value *To) const

bool ContainsCalls

This is set to true if the cloned code contains a normal call instruction.

bool ContainsMemProfMetadata

This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...

std::vector< WeakTrackingVH > OperandBundleCallSites

All cloned call sites that have operand bundles attached are appended to this vector.

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.

Align valueOrOne() const

For convenience, returns a valid alignment or 1 if undefined.

static Instruction * tryGetVTableInstruction(CallBase *CB)

Helper struct for trackAssignments, below.