LLVM: lib/CodeGen/GlobalISel/IRTranslator.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

65#include "llvm/IR/IntrinsicsAMDGPU.h"

86#include

87#include

88#include

89#include

90#include

91#include

92#include

93#include

94

95#define DEBUG_TYPE "irtranslator"

96

97using namespace llvm;

98

101 cl::desc("Should enable CSE in irtranslator"),

104

106 false, false)

114

119 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);

120

121

122

123 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())

124 R << (" (in function: " + MF.getName() + ")").str();

125

126 if (TPC.isGlobalISelAbortEnabled())

128 else

129 ORE.emit(R);

130}

131

134

135#ifndef NDEBUG

136namespace {

137

138

141

142public:

143 DILocationVerifier() = default;

144 ~DILocationVerifier() = default;

145

146 const Instruction *getCurrentInst() const { return CurrInst; }

147 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }

148

152

154 assert(getCurrentInst() && "Inserted instruction without a current MI");

155

156

157#ifndef NDEBUG

158 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst

159 << " was copied to " << MI);

160#endif

161

162

163

165 (MI.getParent()->isEntryBlock() && MI.getDebugLoc()) ||

166 (MI.isDebugInstr())) &&

167 "Line info was not transferred to all instructions");

168 }

169};

170}

171#endif

172

173

182 }

187}

188

190IRTranslator::allocateVRegs(const Value &Val) {

191 auto VRegsIt = VMap.findVRegs(Val);

192 if (VRegsIt != VMap.vregs_end())

193 return *VRegsIt->second;

194 auto *Regs = VMap.getVRegs(Val);

195 auto *Offsets = VMap.getOffsets(Val);

198 Offsets->empty() ? Offsets : nullptr);

199 for (unsigned i = 0; i < SplitTys.size(); ++i)

200 Regs->push_back(0);

201 return *Regs;

202}

203

205 auto VRegsIt = VMap.findVRegs(Val);

206 if (VRegsIt != VMap.vregs_end())

207 return *VRegsIt->second;

208

210 return *VMap.getVRegs(Val);

211

212

213 auto *VRegs = VMap.getVRegs(Val);

214 auto *Offsets = VMap.getOffsets(Val);

215

218 "Don't know how to create an empty vreg");

219

222 Offsets->empty() ? Offsets : nullptr);

223

224 if (!isa(Val)) {

225 for (auto Ty : SplitTys)

227 return *VRegs;

228 }

229

231

232 auto &C = cast(Val);

233 unsigned Idx = 0;

234 while (auto Elt = C.getAggregateElement(Idx++)) {

235 auto EltRegs = getOrCreateVRegs(*Elt);

236 llvm::copy(EltRegs, std::back_inserter(*VRegs));

237 }

238 } else {

239 assert(SplitTys.size() == 1 && "unexpectedly split LLT");

241 bool Success = translate(cast(Val), VRegs->front());

246 R << "unable to translate constant: " << ore::NV("Type", Val.getType());

248 return *VRegs;

249 }

250 }

251

252 return *VRegs;

253}

254

255int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {

256 auto MapEntry = FrameIndices.find(&AI);

257 if (MapEntry != FrameIndices.end())

258 return MapEntry->second;

259

262 ElementSize * cast(AI.getArraySize())->getZExtValue();

263

264

265 Size = std::max<uint64_t>(Size, 1u);

266

267 int &FI = FrameIndices[&AI];

269 return FI;

270}

271

273 if (const StoreInst *SI = dyn_cast(&I))

274 return SI->getAlign();

275 if (const LoadInst *LI = dyn_cast(&I))

276 return LI->getAlign();

279 if (const AtomicRMWInst *AI = dyn_cast(&I))

281

283 R << "unable to translate memop: " << ore::NV("Opcode", &I);

286}

287

290 assert(MBB && "BasicBlock was not encountered before");

291 return *MBB;

292}

293

294void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {

295 assert(NewPred && "new predecessor must be a real MachineBasicBlock");

296 MachinePreds[Edge].push_back(NewPred);

297}

298

299bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,

301

302

303

304

305 Register Op0 = getOrCreateVReg(*U.getOperand(0));

306 Register Op1 = getOrCreateVReg(*U.getOperand(1));

307 Register Res = getOrCreateVReg(U);

309 if (isa(U)) {

312 }

313

315 return true;

316}

317

318bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,

320 Register Op0 = getOrCreateVReg(*U.getOperand(0));

321 Register Res = getOrCreateVReg(U);

323 if (isa(U)) {

326 }

328 return true;

329}

330

331bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {

332 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);

333}

334

335bool IRTranslator::translateCompare(const User &U,

337 auto *CI = cast(&U);

338 Register Op0 = getOrCreateVReg(*U.getOperand(0));

339 Register Op1 = getOrCreateVReg(*U.getOperand(1));

340 Register Res = getOrCreateVReg(U);

344 MIRBuilder.buildICmp(Pred, Res, Op0, Op1, Flags);

351 else

352 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags);

353

354 return true;

355}

356

357bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {

358 const ReturnInst &RI = cast(U);

361 Ret = nullptr;

362

364 if (Ret)

365 VRegs = getOrCreateVRegs(*Ret);

366

371 }

372

373

374

375

376 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);

377}

378

379void IRTranslator::emitBranchForMergedCondition(

383

384

385 if (const CmpInst *BOp = dyn_cast(Cond)) {

387 if (const ICmpInst *IC = dyn_cast(Cond)) {

388 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();

389 } else {

391 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();

392 }

393

395 BOp->getOperand(1), nullptr, TBB, FBB, CurBB,

396 CurBuilder->getDebugLoc(), TProb, FProb);

397 SL->SwitchCases.push_back(CB);

398 return;

399 }

400

401

405 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);

406 SL->SwitchCases.push_back(CB);

407}

408

410 if (const Instruction *I = dyn_cast(V))

411 return I->getParent() == BB;

412 return true;

413}

414

415void IRTranslator::findMergedConditions(

420 using namespace PatternMatch;

421 assert((Opc == Instruction::And || Opc == Instruction::Or) &&

422 "Expected Opc to be AND/OR");

423

424

428 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,

429 !InvertCond);

430 return;

431 }

432

434 const Value *BOpOp0, *BOpOp1;

435

436

437

438

439

441 if (BOp) {

443 ? Instruction::And

445 ? Instruction::Or

447 if (InvertCond) {

448 if (BOpc == Instruction::And)

449 BOpc = Instruction::Or;

450 else if (BOpc == Instruction::Or)

451 BOpc = Instruction::And;

452 }

453 }

454

455

456

457 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();

461 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,

462 InvertCond);

463 return;

464 }

465

466

471

472 if (Opc == Instruction::Or) {

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493 auto NewTrueProb = TProb / 2;

494 auto NewFalseProb = TProb / 2 + FProb;

495

496 findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,

497 NewFalseProb, InvertCond);

498

499

502

503 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],

504 Probs[1], InvertCond);

505 } else {

506 assert(Opc == Instruction::And && "Unknown merge op!");

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526 auto NewTrueProb = TProb + FProb / 2;

527 auto NewFalseProb = FProb / 2;

528

529 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,

530 NewFalseProb, InvertCond);

531

532

535

536 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],

537 Probs[1], InvertCond);

538 }

539}

540

541bool IRTranslator::shouldEmitAsBranches(

542 const std::vectorSwitchCG::CaseBlock &Cases) {

543

544 if (Cases.size() != 2)

545 return true;

546

547

548

549 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&

550 Cases[0].CmpRHS == Cases[1].CmpRHS) ||

551 (Cases[0].CmpRHS == Cases[1].CmpLHS &&

552 Cases[0].CmpLHS == Cases[1].CmpRHS)) {

553 return false;

554 }

555

556

557

558 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&

559 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&

560 isa(Cases[0].CmpRHS) &&

561 cast(Cases[0].CmpRHS)->isNullValue()) {

563 Cases[0].TrueBB == Cases[1].ThisBB)

564 return false;

566 Cases[0].FalseBB == Cases[1].ThisBB)

567 return false;

568 }

569

570 return true;

571}

572

574 const BranchInst &BrInst = cast(U);

575 auto &CurMBB = MIRBuilder.getMBB();

576 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));

577

579

581 !CurMBB.isLayoutSuccessor(Succ0MBB))

582 MIRBuilder.buildBr(*Succ0MBB);

583

584

586 CurMBB.addSuccessor(&getMBB(*Succ));

587 return true;

588 }

589

590

591

594

595

596

597

598

599

600

601

602

603

604

605

606

607

608

609

610

611

612 using namespace PatternMatch;

613 const Instruction *CondI = dyn_cast(CondVal);

615 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {

618 const Value *BOp0, *BOp1;

620 Opcode = Instruction::And;

622 Opcode = Instruction::Or;

623

626 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,

627 getEdgeProbability(&CurMBB, Succ0MBB),

628 getEdgeProbability(&CurMBB, Succ1MBB),

629 false);

630 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");

631

632

633 if (shouldEmitAsBranches(SL->SwitchCases)) {

634

635 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);

636 SL->SwitchCases.erase(SL->SwitchCases.begin());

637 return true;

638 }

639

640

641

642 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)

643 MF->erase(SL->SwitchCases[I].ThisBB);

644

645 SL->SwitchCases.clear();

646 }

647 }

648

649

652 nullptr, Succ0MBB, Succ1MBB, &CurMBB,

653 CurBuilder->getDebugLoc());

654

655

656

657 emitSwitchCase(CB, &CurMBB, *CurBuilder);

658 return true;

659}

660

664 if (!FuncInfo.BPI) {

665 Src->addSuccessorWithoutProb(Dst);

666 return;

667 }

669 Prob = getEdgeProbability(Src, Dst);

670 Src->addSuccessor(Dst, Prob);

671}

672

676 const BasicBlock *SrcBB = Src->getBasicBlock();

677 const BasicBlock *DstBB = Dst->getBasicBlock();

678 if (!FuncInfo.BPI) {

679

680

681 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);

683 }

685}

686

688 using namespace SwitchCG;

689

693 Clusters.reserve(SI.getNumCases());

694 for (const auto &I : SI.cases()) {

696 assert(Succ && "Could not find successor mbb in mapping");

697 const ConstantInt *CaseVal = I.getCaseValue();

701 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));

702 }

703

705

706

707

708

710

712

713

714 if (Clusters.empty()) {

716 if (DefaultMBB != SwitchMBB->getNextNode())

717 MIB.buildBr(*DefaultMBB);

718 return true;

719 }

720

721 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB, nullptr, nullptr);

722 SL->findBitTestClusters(Clusters, &SI);

723

725 dbgs() << "Case clusters: ";

726 for (const CaseCluster &C : Clusters) {

727 if (C.Kind == CC_JumpTable)

728 dbgs() << "JT:";

729 if (C.Kind == CC_BitTests)

730 dbgs() << "BT:";

731

732 C.Low->getValue().print(dbgs(), true);

733 if (C.Low != C.High) {

734 dbgs() << '-';

735 C.High->getValue().print(dbgs(), true);

736 }

737 dbgs() << ' ';

738 }

739 dbgs() << '\n';

740 });

741

742 assert(!Clusters.empty());

746 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);

747 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});

748

749 while (!WorkList.empty()) {

750 SwitchWorkListItem W = WorkList.pop_back_val();

751

752 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;

753

754 if (NumClusters > 3 &&

757 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB, MIB);

758 continue;

759 }

760

761 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))

762 return false;

763 }

764 return true;

765}

766

771 using namespace SwitchCG;

772 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&

773 "Clusters not sorted?");

774 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");

775

776 auto [LastLeft, FirstRight, LeftProb, RightProb] =

777 SL->computeSplitWorkItemInfo(W);

778

779

780

782 assert(PivotCluster > W.FirstCluster);

783 assert(PivotCluster <= W.LastCluster);

784

787

788 const ConstantInt *Pivot = PivotCluster->Low;

789

790

792 ++BBI;

793

794

795

796

798 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&

799 FirstLeft->Low == W.GE &&

800 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {

801 LeftMBB = FirstLeft->MBB;

802 } else {

804 FuncInfo.MF->insert(BBI, LeftMBB);

806 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});

807 }

808

809

810

811

813 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && W.LT &&

814 (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {

815 RightMBB = FirstRight->MBB;

816 } else {

818 FuncInfo.MF->insert(BBI, RightMBB);

820 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});

821 }

822

823

825 LeftMBB, RightMBB, W.MBB, MIB.getDebugLoc(), LeftProb,

826 RightProb);

827

828 if (W.MBB == SwitchMBB)

829 emitSwitchCase(CB, SwitchMBB, MIB);

830 else

831 SL->SwitchCases.push_back(CB);

832}

833

836

837 assert(JT.Reg && "Should lower JT Header first!");

840 MIB.setDebugLoc(CurBuilder->getDebugLoc());

841

844

847}

848

853 MIB.setMBB(*HeaderBB);

854 MIB.setDebugLoc(CurBuilder->getDebugLoc());

855

857

859 Register SwitchOpReg = getOrCreateVReg(SValue);

861 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);

862

863

864

868

869 JT.Reg = Sub.getReg(0);

870

874 return true;

875 }

876

877

878

879

880 auto Cst = getOrCreateVReg(

884

886

887

890 return true;

891}

892

901

903

911 return;

912 }

913

915

917 const auto *CI = dyn_cast(CB.CmpRHS);

918

919

920

923 Cond = CondLHS;

924 } else {

929 else

932 }

933 } else {

935 "Can only handle SLE ranges");

936

937 const APInt& Low = cast(CB.CmpLHS)->getValue();

938 const APInt& High = cast(CB.CmpRHS)->getValue();

939

941 if (cast(CB.CmpLHS)->isMinValue(true)) {

945 } else {

946 const LLT CmpTy = MRI->getType(CmpOpReg);

947 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);

950 }

951 }

952

953

955

958

959

960

964

967

971}

972

982 bool FallthroughUnreachable) {

983 using namespace SwitchCG;

985

986 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;

989

990

992 CurMF->insert(BBI, JumpMBB);

993

994

995

996

998 CurMBB);

1000 JumpMBB);

1001

1002 auto JumpProb = I->Prob;

1003 auto FallthroughProb = UnhandledProbs;

1004

1005

1006

1007

1010 SI != SE; ++SI) {

1011 if (*SI == DefaultMBB) {

1012 JumpProb += DefaultProb / 2;

1013 FallthroughProb -= DefaultProb / 2;

1016 } else {

1017

1018 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},

1019 JumpMBB);

1020 }

1021 }

1022

1023 if (FallthroughUnreachable)

1024 JTH->FallthroughUnreachable = true;

1025

1026 if (!JTH->FallthroughUnreachable)

1027 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);

1028 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);

1030

1031

1032

1033 JTH->HeaderBB = CurMBB;

1034 JT->Default = Fallthrough;

1035

1036

1037 if (CurMBB == SwitchMBB) {

1038 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))

1039 return false;

1040 JTH->Emitted = true;

1041 }

1042 return true;

1043}

1047 bool FallthroughUnreachable,

1052 using namespace SwitchCG;

1055 if (I->Low == I->High) {

1056

1059 RHS = I->Low;

1060 MHS = nullptr;

1061 } else {

1062

1064 LHS = I->Low;

1066 RHS = I->High;

1067 }

1068

1069

1070

1071 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,

1072 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);

1073

1074 emitSwitchCase(CB, SwitchMBB, MIB);

1075 return true;

1076}

1077

1081 MIB.setMBB(*SwitchBB);

1082

1083

1084 Register SwitchOpReg = getOrCreateVReg(*B.SValue);

1085

1086 LLT SwitchOpTy = MRI->getType(SwitchOpReg);

1088 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);

1089

1092

1093 LLT MaskTy = SwitchOpTy;

1095 !llvm::has_single_bit<uint32_t>(MaskTy.getSizeInBits()))

1097 else {

1098

1099 for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {

1101

1102

1104 break;

1105 }

1106 }

1107 }

1109 if (SwitchOpTy != MaskTy)

1111

1114

1116

1117 if (B.FallthroughUnreachable)

1118 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);

1119 addSuccessorWithProb(SwitchBB, MBB, B.Prob);

1120

1122

1123 if (B.FallthroughUnreachable) {

1124

1125 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);

1127 RangeSub, RangeCst);

1129 }

1130

1131

1134}

1135

1142 MIB.setMBB(*SwitchBB);

1143

1147 if (PopCount == 1) {

1148

1149

1150 auto MaskTrailingZeros =

1155 } else if (PopCount == BB.Range) {

1156

1157 auto MaskTrailingOnes =

1161 } else {

1162

1164 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);

1165

1166

1168 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);

1172 }

1173

1174

1175 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);

1176

1177 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);

1178

1179

1180

1182

1183

1184

1185 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},

1186 SwitchBB);

1187

1189

1190

1193}

1194

1195bool IRTranslator::lowerBitTestWorkItem(

1201 bool FallthroughUnreachable) {

1202 using namespace SwitchCG;

1204

1205 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];

1206

1207 for (BitTestCase &BTC : BTB->Cases)

1208 CurMF->insert(BBI, BTC.ThisBB);

1209

1210

1211 BTB->Parent = CurMBB;

1212 BTB->Default = Fallthrough;

1213

1214 BTB->DefaultProb = UnhandledProbs;

1215

1216

1217

1218 if (!BTB->ContiguousRange) {

1219 BTB->Prob += DefaultProb / 2;

1220 BTB->DefaultProb -= DefaultProb / 2;

1221 }

1222

1223 if (FallthroughUnreachable)

1224 BTB->FallthroughUnreachable = true;

1225

1226

1227 if (CurMBB == SwitchMBB) {

1228 emitBitTestHeader(*BTB, SwitchMBB);

1229 BTB->Emitted = true;

1230 }

1231 return true;

1232}

1233

1239 using namespace SwitchCG;

1243 if (++BBI != FuncInfo.MF->end())

1244 NextMBB = &*BBI;

1245

1246 if (EnableOpts) {

1247

1248

1249

1250

1251 llvm::sort(W.FirstCluster, W.LastCluster + 1,

1252 [](const CaseCluster &a, const CaseCluster &b) {

1253 return a.Prob != b.Prob

1254 ? a.Prob > b.Prob

1255 : a.Low->getValue().slt(b.Low->getValue());

1256 });

1257

1258

1259

1260 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {

1261 --I;

1262 if (I->Prob > W.LastCluster->Prob)

1263 break;

1264 if (I->Kind == CC_Range && I->MBB == NextMBB) {

1266 break;

1267 }

1268 }

1269 }

1270

1271

1274 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)

1275 UnhandledProbs += I->Prob;

1276

1278 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {

1279 bool FallthroughUnreachable = false;

1281 if (I == W.LastCluster) {

1282

1283 Fallthrough = DefaultMBB;

1284 FallthroughUnreachable = isa(

1286 } else {

1288 CurMF->insert(BBI, Fallthrough);

1289 }

1290 UnhandledProbs -= I->Prob;

1291

1292 switch (I->Kind) {

1294 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,

1295 DefaultProb, UnhandledProbs, I, Fallthrough,

1296 FallthroughUnreachable)) {

1297 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");

1298 return false;

1299 }

1300 break;

1301 }

1302

1304 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,

1305 UnhandledProbs, I, Fallthrough,

1306 FallthroughUnreachable)) {

1308 return false;

1309 }

1310 break;

1311 }

1313 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,

1314 FallthroughUnreachable, UnhandledProbs,

1315 CurMBB, MIB, SwitchMBB)) {

1316 LLVM_DEBUG(dbgs() << "Failed to lower switch range");

1317 return false;

1318 }

1319 break;

1320 }

1321 }

1322 CurMBB = Fallthrough;

1323 }

1324

1325 return true;

1326}

1327

1328bool IRTranslator::translateIndirectBr(const User &U,

1330 const IndirectBrInst &BrInst = cast(U);

1331

1334

1335

1339

1340

1341

1342 if (!AddedSuccessors.insert(Succ).second)

1343 continue;

1345 }

1346

1347 return true;

1348}

1349

1351 if (auto Arg = dyn_cast(V))

1352 return Arg->hasSwiftErrorAttr();

1353 if (auto AI = dyn_cast(V))

1355 return false;

1356}

1357

1358bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {

1359 const LoadInst &LI = cast(U);

1361 if (StoreSize.isZero())

1362 return true;

1363

1368

1372

1374 assert(Regs.size() == 1 && "swifterror should be single pointer");

1377 MIRBuilder.buildCopy(Regs[0], VReg);

1378 return true;

1379 }

1380

1387 }

1388 }

1389

1391 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;

1392 for (unsigned i = 0; i < Regs.size(); ++i) {

1395

1397 Align BaseAlign = getMemOpAlign(LI);

1400 commonAlignment(BaseAlign, Offsets[i] / 8), AAInfo, Ranges,

1403 }

1404

1405 return true;

1406}

1407

1408bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {

1409 const StoreInst &SI = cast(U);

1411 return true;

1412

1415 Register Base = getOrCreateVReg(*SI.getPointerOperand());

1416

1419

1421 assert(Vals.size() == 1 && "swifterror should be single pointer");

1422

1424 SI.getPointerOperand());

1425 MIRBuilder.buildCopy(VReg, Vals[0]);

1426 return true;

1427 }

1428

1430

1431 for (unsigned i = 0; i < Vals.size(); ++i) {

1434

1436 Align BaseAlign = getMemOpAlign(SI);

1439 commonAlignment(BaseAlign, Offsets[i] / 8), SI.getAAMetadata(), nullptr,

1440 SI.getSyncScopeID(), SI.getOrdering());

1442 }

1443 return true;

1444}

1445

1447 const Value *Src = U.getOperand(0);

1449

1450

1451

1453 Indices.push_back(ConstantInt::get(Int32Ty, 0));

1454

1455 if (const ExtractValueInst *EVI = dyn_cast(&U)) {

1456 for (auto Idx : EVI->indices())

1457 Indices.push_back(ConstantInt::get(Int32Ty, Idx));

1458 } else if (const InsertValueInst *IVI = dyn_cast(&U)) {

1459 for (auto Idx : IVI->indices())

1460 Indices.push_back(ConstantInt::get(Int32Ty, Idx));

1461 } else {

1464 }

1465

1466 return 8 * static_cast<uint64_t>(

1467 DL.getIndexedOffsetInType(Src->getType(), Indices));

1468}

1469

1470bool IRTranslator::translateExtractValue(const User &U,

1472 const Value *Src = U.getOperand(0);

1477 auto &DstRegs = allocateVRegs(U);

1478

1479 for (unsigned i = 0; i < DstRegs.size(); ++i)

1480 DstRegs[i] = SrcRegs[Idx++];

1481

1482 return true;

1483}

1484

1485bool IRTranslator::translateInsertValue(const User &U,

1487 const Value *Src = U.getOperand(0);

1489 auto &DstRegs = allocateVRegs(U);

1493 auto *InsertedIt = InsertedRegs.begin();

1494

1495 for (unsigned i = 0; i < DstRegs.size(); ++i) {

1496 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())

1497 DstRegs[i] = *InsertedIt++;

1498 else

1499 DstRegs[i] = SrcRegs[i];

1500 }

1501

1502 return true;

1503}

1504

1505bool IRTranslator::translateSelect(const User &U,

1507 Register Tst = getOrCreateVReg(*U.getOperand(0));

1511

1513 if (const SelectInst *SI = dyn_cast(&U))

1515

1516 for (unsigned i = 0; i < ResRegs.size(); ++i) {

1517 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);

1518 }

1519

1520 return true;

1521}

1522

1523bool IRTranslator::translateCopy(const User &U, const Value &V,

1525 Register Src = getOrCreateVReg(V);

1526 auto &Regs = *VMap.getVRegs(U);

1527 if (Regs.empty()) {

1528 Regs.push_back(Src);

1529 VMap.getOffsets(U)->push_back(0);

1530 } else {

1531

1532

1533 MIRBuilder.buildCopy(Regs[0], Src);

1534 }

1535 return true;

1536}

1537

1538bool IRTranslator::translateBitCast(const User &U,

1540

1541 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==

1543

1544

1545 if (isa(U.getOperand(0)))

1546 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,

1547 MIRBuilder);

1548 return translateCopy(U, *U.getOperand(0), MIRBuilder);

1549 }

1550

1551 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);

1552}

1553

1554bool IRTranslator::translateCast(unsigned Opcode, const User &U,

1556 if (U.getType()->getScalarType()->isBFloatTy() ||

1557 U.getOperand(0)->getType()->getScalarType()->isBFloatTy())

1558 return false;

1559

1561 if (const Instruction *I = dyn_cast(&U))

1563

1564 Register Op = getOrCreateVReg(*U.getOperand(0));

1565 Register Res = getOrCreateVReg(U);

1567 return true;

1568}

1569

1570bool IRTranslator::translateGetElementPtr(const User &U,

1572 Value &Op0 = *U.getOperand(0);

1573 Register BaseReg = getOrCreateVReg(Op0);

1578

1580 if (const Instruction *I = dyn_cast(&U))

1582

1583

1584

1585 unsigned VectorWidth = 0;

1586

1587

1588

1589 bool WantSplatVector = false;

1590 if (auto *VT = dyn_cast(U.getType())) {

1591 VectorWidth = cast(VT)->getNumElements();

1592

1593 WantSplatVector = VectorWidth > 1;

1594 }

1595

1596

1597

1598 if (WantSplatVector && !PtrTy.isVector()) {

1599 BaseReg = MIRBuilder

1601 BaseReg)

1607 }

1608

1611 GTI != E; ++GTI) {

1612 const Value *Idx = GTI.getOperand();

1613 if (StructType *StTy = GTI.getStructTypeOrNull()) {

1614 unsigned Field = cast(Idx)->getUniqueInteger().getZExtValue();

1616 continue;

1617 } else {

1618 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);

1619

1620

1621

1622 if (const auto *CI = dyn_cast(Idx)) {

1623 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {

1624 Offset += ElementSize * *Val;

1625 continue;

1626 }

1627 }

1628

1631 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))

1634 }

1635

1636 Register IdxReg = getOrCreateVReg(*Idx);

1638 if (IdxTy != OffsetTy) {

1639 if (!IdxTy.isVector() && WantSplatVector) {

1640 IdxReg = MIRBuilder

1642 IdxReg)

1644 }

1645

1647 }

1648

1649

1650

1652 if (ElementSize != 1) {

1655 GepOffsetReg =

1656 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);

1657 } else

1658 GepOffsetReg = IdxReg;

1659

1660 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);

1661 }

1662 }

1663

1665 auto OffsetMIB =

1667

1668 if (int64_t(Offset) >= 0 && cast(U).isInBounds())

1670

1671 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),

1672 Flags);

1673 return true;

1674 }

1675

1676 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);

1677 return true;

1678}

1679

1680bool IRTranslator::translateMemFunc(const CallInst &CI,

1682 unsigned Opcode) {

1684

1685 if (isa(SrcPtr))

1686 return true;

1687

1689

1690 unsigned MinPtrSize = UINT_MAX;

1691 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {

1692 Register SrcReg = getOrCreateVReg(**AI);

1695 MinPtrSize = std::min(SrcTy.getSizeInBits(), MinPtrSize);

1697 }

1698

1700

1701

1702 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];

1703 if (MRI->getType(SizeOpReg) != SizeTy)

1705

1706 auto ICall = MIRBuilder.buildInstr(Opcode);

1707 for (Register SrcReg : SrcRegs)

1708 ICall.addUse(SrcReg);

1709

1712 unsigned IsVol =

1714

1716

1717 if (auto *MCI = dyn_cast(&CI)) {

1718 DstAlign = MCI->getDestAlign().valueOrOne();

1719 SrcAlign = MCI->getSourceAlign().valueOrOne();

1720 CopySize = dyn_cast(MCI->getArgOperand(2));

1721 } else if (auto *MCI = dyn_cast(&CI)) {

1722 DstAlign = MCI->getDestAlign().valueOrOne();

1723 SrcAlign = MCI->getSourceAlign().valueOrOne();

1724 CopySize = dyn_cast(MCI->getArgOperand(2));

1725 } else if (auto *MMI = dyn_cast(&CI)) {

1726 DstAlign = MMI->getDestAlign().valueOrOne();

1727 SrcAlign = MMI->getSourceAlign().valueOrOne();

1728 CopySize = dyn_cast(MMI->getArgOperand(2));

1729 } else {

1730 auto *MSI = cast(&CI);

1731 DstAlign = MSI->getDestAlign().valueOrOne();

1732 }

1733

1734 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {

1735

1736

1737

1738 ICall.addImm(CI.isTailCall() ? 1 : 0);

1739 }

1740

1741

1744 if (IsVol) {

1747 }

1748

1750 if (AA && CopySize &&

1754

1755

1756

1757

1759 }

1760

1761 ICall.addMemOperand(

1763 StoreFlags, 1, DstAlign, AAInfo));

1764 if (Opcode != TargetOpcode::G_MEMSET)

1767

1768 return true;

1769}

1770

1771bool IRTranslator::translateTrap(const CallInst &CI,

1773 unsigned Opcode) {

1776 if (TrapFuncName.empty()) {

1777 if (Opcode == TargetOpcode::G_UBSANTRAP) {

1780 } else {

1782 }

1783 return true;

1784 }

1785

1787 if (Opcode == TargetOpcode::G_UBSANTRAP)

1788 Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),

1790

1792 Info.CB = &CI;

1794 return CLI->lowerCall(MIRBuilder, Info);

1795}

1796

1797bool IRTranslator::translateVectorInterleave2Intrinsic(

1800 "This function can only be called on the interleave2 intrinsic!");

1801

1804 Register Res = getOrCreateVReg(CI);

1805

1809

1810 return true;

1811}

1812

1813bool IRTranslator::translateVectorDeinterleave2Intrinsic(

1816 "This function can only be called on the deinterleave2 intrinsic!");

1817

1818

1822

1828

1829 return true;

1830}

1831

1832void IRTranslator::getStackGuard(Register DstReg,

1835 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));

1836 auto MIB =

1837 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});

1838

1841 return;

1842

1843 unsigned AddrSpace = Global->getType()->getPointerAddressSpace();

1845

1851 MIB.setMemRefs({MemRef});

1852}

1853

1854bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,

1858 Op, {ResRegs[0], ResRegs[1]},

1860

1861 return true;

1862}

1863

1864bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,

1866 Register Dst = getOrCreateVReg(CI);

1869 uint64_t Scale = cast(CI.getOperand(2))->getZExtValue();

1870 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });

1871 return true;

1872}

1873

1874unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {

1875 switch (ID) {

1876 default:

1877 break;

1878 case Intrinsic::acos:

1879 return TargetOpcode::G_FACOS;

1880 case Intrinsic::asin:

1881 return TargetOpcode::G_FASIN;

1882 case Intrinsic::atan:

1883 return TargetOpcode::G_FATAN;

1884 case Intrinsic::atan2:

1885 return TargetOpcode::G_FATAN2;

1886 case Intrinsic::bswap:

1887 return TargetOpcode::G_BSWAP;

1888 case Intrinsic::bitreverse:

1889 return TargetOpcode::G_BITREVERSE;

1890 case Intrinsic::fshl:

1891 return TargetOpcode::G_FSHL;

1892 case Intrinsic::fshr:

1893 return TargetOpcode::G_FSHR;

1894 case Intrinsic::ceil:

1895 return TargetOpcode::G_FCEIL;

1896 case Intrinsic::cos:

1897 return TargetOpcode::G_FCOS;

1898 case Intrinsic::cosh:

1899 return TargetOpcode::G_FCOSH;

1900 case Intrinsic::ctpop:

1901 return TargetOpcode::G_CTPOP;

1902 case Intrinsic::exp:

1903 return TargetOpcode::G_FEXP;

1904 case Intrinsic::exp2:

1905 return TargetOpcode::G_FEXP2;

1906 case Intrinsic::exp10:

1907 return TargetOpcode::G_FEXP10;

1908 case Intrinsic::fabs:

1909 return TargetOpcode::G_FABS;

1910 case Intrinsic::copysign:

1911 return TargetOpcode::G_FCOPYSIGN;

1912 case Intrinsic::minnum:

1913 return TargetOpcode::G_FMINNUM;

1914 case Intrinsic::maxnum:

1915 return TargetOpcode::G_FMAXNUM;

1916 case Intrinsic::minimum:

1917 return TargetOpcode::G_FMINIMUM;

1918 case Intrinsic::maximum:

1919 return TargetOpcode::G_FMAXIMUM;

1920 case Intrinsic::canonicalize:

1921 return TargetOpcode::G_FCANONICALIZE;

1922 case Intrinsic:🤣

1923 return TargetOpcode::G_FFLOOR;

1924 case Intrinsic::fma:

1925 return TargetOpcode::G_FMA;

1926 case Intrinsic:🪵

1927 return TargetOpcode::G_FLOG;

1928 case Intrinsic::log2:

1929 return TargetOpcode::G_FLOG2;

1930 case Intrinsic::log10:

1931 return TargetOpcode::G_FLOG10;

1932 case Intrinsic::ldexp:

1933 return TargetOpcode::G_FLDEXP;

1934 case Intrinsic::nearbyint:

1935 return TargetOpcode::G_FNEARBYINT;

1936 case Intrinsic::pow:

1937 return TargetOpcode::G_FPOW;

1938 case Intrinsic::powi:

1939 return TargetOpcode::G_FPOWI;

1940 case Intrinsic::rint:

1941 return TargetOpcode::G_FRINT;

1942 case Intrinsic::round:

1943 return TargetOpcode::G_INTRINSIC_ROUND;

1944 case Intrinsic::roundeven:

1945 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;

1946 case Intrinsic::sin:

1947 return TargetOpcode::G_FSIN;

1948 case Intrinsic::sinh:

1949 return TargetOpcode::G_FSINH;

1950 case Intrinsic::sqrt:

1951 return TargetOpcode::G_FSQRT;

1952 case Intrinsic::tan:

1953 return TargetOpcode::G_FTAN;

1954 case Intrinsic::tanh:

1955 return TargetOpcode::G_FTANH;

1956 case Intrinsic::trunc:

1957 return TargetOpcode::G_INTRINSIC_TRUNC;

1958 case Intrinsic::readcyclecounter:

1959 return TargetOpcode::G_READCYCLECOUNTER;

1960 case Intrinsic::readsteadycounter:

1961 return TargetOpcode::G_READSTEADYCOUNTER;

1962 case Intrinsic::ptrmask:

1963 return TargetOpcode::G_PTRMASK;

1964 case Intrinsic::lrint:

1965 return TargetOpcode::G_INTRINSIC_LRINT;

1966 case Intrinsic::llrint:

1967 return TargetOpcode::G_INTRINSIC_LLRINT;

1968

1969 case Intrinsic::vector_reduce_fmin:

1970 return TargetOpcode::G_VECREDUCE_FMIN;

1971 case Intrinsic::vector_reduce_fmax:

1972 return TargetOpcode::G_VECREDUCE_FMAX;

1973 case Intrinsic::vector_reduce_fminimum:

1974 return TargetOpcode::G_VECREDUCE_FMINIMUM;

1975 case Intrinsic::vector_reduce_fmaximum:

1976 return TargetOpcode::G_VECREDUCE_FMAXIMUM;

1977 case Intrinsic::vector_reduce_add:

1978 return TargetOpcode::G_VECREDUCE_ADD;

1979 case Intrinsic::vector_reduce_mul:

1980 return TargetOpcode::G_VECREDUCE_MUL;

1981 case Intrinsic::vector_reduce_and:

1982 return TargetOpcode::G_VECREDUCE_AND;

1983 case Intrinsic::vector_reduce_or:

1984 return TargetOpcode::G_VECREDUCE_OR;

1985 case Intrinsic::vector_reduce_xor:

1986 return TargetOpcode::G_VECREDUCE_XOR;

1987 case Intrinsic::vector_reduce_smax:

1988 return TargetOpcode::G_VECREDUCE_SMAX;

1989 case Intrinsic::vector_reduce_smin:

1990 return TargetOpcode::G_VECREDUCE_SMIN;

1991 case Intrinsic::vector_reduce_umax:

1992 return TargetOpcode::G_VECREDUCE_UMAX;

1993 case Intrinsic::vector_reduce_umin:

1994 return TargetOpcode::G_VECREDUCE_UMIN;

1995 case Intrinsic::experimental_vector_compress:

1996 return TargetOpcode::G_VECTOR_COMPRESS;

1997 case Intrinsic::lround:

1998 return TargetOpcode::G_LROUND;

1999 case Intrinsic::llround:

2000 return TargetOpcode::G_LLROUND;

2001 case Intrinsic::get_fpenv:

2002 return TargetOpcode::G_GET_FPENV;

2003 case Intrinsic::get_fpmode:

2004 return TargetOpcode::G_GET_FPMODE;

2005 }

2007}

2008

2009bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,

2012

2013 unsigned Op = getSimpleIntrinsicOpcode(ID);

2014

2015

2017 return false;

2018

2019

2021 for (const auto &Arg : CI.args())

2022 VRegs.push_back(getOrCreateVReg(*Arg));

2023

2024 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,

2026 return true;

2027}

2028

2029

2031 switch (ID) {

2032 case Intrinsic::experimental_constrained_fadd:

2033 return TargetOpcode::G_STRICT_FADD;

2034 case Intrinsic::experimental_constrained_fsub:

2035 return TargetOpcode::G_STRICT_FSUB;

2036 case Intrinsic::experimental_constrained_fmul:

2037 return TargetOpcode::G_STRICT_FMUL;

2038 case Intrinsic::experimental_constrained_fdiv:

2039 return TargetOpcode::G_STRICT_FDIV;

2040 case Intrinsic::experimental_constrained_frem:

2041 return TargetOpcode::G_STRICT_FREM;

2042 case Intrinsic::experimental_constrained_fma:

2043 return TargetOpcode::G_STRICT_FMA;

2044 case Intrinsic::experimental_constrained_sqrt:

2045 return TargetOpcode::G_STRICT_FSQRT;

2046 case Intrinsic::experimental_constrained_ldexp:

2047 return TargetOpcode::G_STRICT_FLDEXP;

2048 default:

2049 return 0;

2050 }

2051}

2052

2053bool IRTranslator::translateConstrainedFPIntrinsic(

2056

2058 if (!Opcode)

2059 return false;

2060

2064

2068

2069 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);

2070 return true;

2071}

2072

2073std::optional IRTranslator::getArgPhysReg(Argument &Arg) {

2074 auto VRegs = getOrCreateVRegs(Arg);

2075 if (VRegs.size() != 1)

2076 return std::nullopt;

2077

2078

2080 if (!VRegDef || !VRegDef->isCopy())

2081 return std::nullopt;

2083}

2084

2085bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val,

2090 auto *Arg = dyn_cast(Val);

2091 if (!Arg)

2092 return false;

2093

2095 return false;

2096

2097 std::optional PhysReg = getArgPhysReg(*Arg);

2098 if (!PhysReg) {

2099 LLVM_DEBUG(dbgs() << "Dropping dbg." << (isDeclare ? "declare" : "value")

2100 << ": expression is entry_value but "

2101 << "couldn't find a physical register\n");

2103 return true;

2104 }

2105

2106 if (isDeclare) {

2107

2110 } else {

2112 }

2113

2114 return true;

2115}

2116

2118 switch (ID) {

2119 default:

2121 case Intrinsic::experimental_convergence_anchor:

2122 return TargetOpcode::CONVERGENCECTRL_ANCHOR;

2123 case Intrinsic::experimental_convergence_entry:

2124 return TargetOpcode::CONVERGENCECTRL_ENTRY;

2125 case Intrinsic::experimental_convergence_loop:

2126 return TargetOpcode::CONVERGENCECTRL_LOOP;

2127 }

2128}

2129

2130bool IRTranslator::translateConvergenceControlIntrinsic(

2133 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);

2134 MIB.addDef(OutputReg);

2135

2136 if (ID == Intrinsic::experimental_convergence_loop) {

2138 assert(Bundle && "Expected a convergence control token.");

2140 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());

2141 MIB.addUse(InputReg);

2142 }

2143

2144 return true;

2145}

2146

2149 if (auto *MI = dyn_cast(&CI)) {

2150 if (ORE->enabled()) {

2152 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);

2153 R.visit(MI);

2154 }

2155 }

2156 }

2157

2158

2159

2160 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))

2161 return true;

2162

2163 switch (ID) {

2164 default:

2165 break;

2166 case Intrinsic::lifetime_start:

2167 case Intrinsic::lifetime_end: {

2168

2171 return true;

2172

2173 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START

2174 : TargetOpcode::LIFETIME_END;

2175

2176

2177

2180

2181

2182

2183 for (const Value *V : Allocas) {

2184 const AllocaInst *AI = dyn_cast(V);

2185 if (!AI)

2186 continue;

2187

2189 return true;

2190

2192 }

2193 return true;

2194 }

2195 case Intrinsic::fake_use: {

2197 for (const auto &Arg : CI.args())

2198 for (auto VReg : getOrCreateVRegs(*Arg))

2200 MIRBuilder.buildInstr(TargetOpcode::FAKE_USE, {}, VRegs);

2202 return true;

2203 }

2204 case Intrinsic::dbg_declare: {

2205 const DbgDeclareInst &DI = cast(CI);

2209 return true;

2210 }

2211 case Intrinsic::dbg_label: {

2212 const DbgLabelInst &DI = cast(CI);

2214

2217 "Expected inlined-at fields to agree");

2218

2220 return true;

2221 }

2222 case Intrinsic::vaend:

2223

2224

2225 return true;

2226 case Intrinsic::vastart: {

2230

2231 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})

2234 ListSize, Alignment));

2235 return true;

2236 }

2237 case Intrinsic::dbg_assign:

2238

2239

2240

2241

2242

2243 [[fallthrough]];

2244 case Intrinsic::dbg_value: {

2245

2246 const DbgValueInst &DI = cast(CI);

2249 return true;

2250 }

2251 case Intrinsic::uadd_with_overflow:

2252 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);

2253 case Intrinsic::sadd_with_overflow:

2254 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);

2255 case Intrinsic::usub_with_overflow:

2256 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);

2257 case Intrinsic::ssub_with_overflow:

2258 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);

2259 case Intrinsic::umul_with_overflow:

2260 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);

2261 case Intrinsic::smul_with_overflow:

2262 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);

2263 case Intrinsic::uadd_sat:

2264 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);

2265 case Intrinsic::sadd_sat:

2266 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);

2267 case Intrinsic::usub_sat:

2268 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);

2269 case Intrinsic::ssub_sat:

2270 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);

2271 case Intrinsic::ushl_sat:

2272 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);

2273 case Intrinsic::sshl_sat:

2274 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);

2275 case Intrinsic::umin:

2276 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);

2277 case Intrinsic::umax:

2278 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);

2279 case Intrinsic::smin:

2280 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);

2281 case Intrinsic::smax:

2282 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);

2283 case Intrinsic::abs:

2284

2285 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);

2286 case Intrinsic::smul_fix:

2287 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);

2288 case Intrinsic::umul_fix:

2289 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);

2290 case Intrinsic::smul_fix_sat:

2291 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);

2292 case Intrinsic::umul_fix_sat:

2293 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);

2294 case Intrinsic::sdiv_fix:

2295 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);

2296 case Intrinsic::udiv_fix:

2297 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);

2298 case Intrinsic::sdiv_fix_sat:

2299 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);

2300 case Intrinsic::udiv_fix_sat:

2301 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);

2302 case Intrinsic::fmuladd: {

2304 Register Dst = getOrCreateVReg(CI);

2311

2312

2313 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,

2315 } else {

2321 }

2322 return true;

2323 }

2324 case Intrinsic::convert_from_fp16:

2325

2326 MIRBuilder.buildFPExt(getOrCreateVReg(CI),

2329 return true;

2330 case Intrinsic::convert_to_fp16:

2331

2332 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),

2335 return true;

2336 case Intrinsic::frexp: {

2338 MIRBuilder.buildFFrexp(VRegs[0], VRegs[1],

2341 return true;

2342 }

2343 case Intrinsic::sincos: {

2348 return true;

2349 }

2350 case Intrinsic::fptosi_sat:

2353 return true;

2354 case Intrinsic::fptoui_sat:

2357 return true;

2358 case Intrinsic::memcpy_inline:

2359 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);

2360 case Intrinsic::memcpy:

2361 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);

2362 case Intrinsic::memmove:

2363 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);

2364 case Intrinsic::memset:

2365 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);

2366 case Intrinsic::eh_typeid_for: {

2371 return true;

2372 }

2373 case Intrinsic::objectsize:

2374 llvm_unreachable("llvm.objectsize.* should have been lowered already");

2375

2376 case Intrinsic::is_constant:

2377 llvm_unreachable("llvm.is.constant.* should have been lowered already");

2378

2379 case Intrinsic::stackguard:

2380 getStackGuard(getOrCreateVReg(CI), MIRBuilder);

2381 return true;

2382 case Intrinsic::stackprotector: {

2387 getStackGuard(GuardVal, MIRBuilder);

2388 } else

2389 GuardVal = getOrCreateVReg(*CI.getArgOperand(0));

2390

2392 int FI = getOrCreateFrameIndex(*Slot);

2394

2396 GuardVal, getOrCreateVReg(*Slot),

2400 PtrTy, Align(8)));

2401 return true;

2402 }

2403 case Intrinsic::stacksave: {

2404 MIRBuilder.buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});

2405 return true;

2406 }

2407 case Intrinsic::stackrestore: {

2408 MIRBuilder.buildInstr(TargetOpcode::G_STACKRESTORE, {},

2410 return true;

2411 }

2412 case Intrinsic::cttz:

2413 case Intrinsic::ctlz: {

2415 bool isTrailing = ID == Intrinsic::cttz;

2416 unsigned Opcode = isTrailing

2417 ? Cst->isZero() ? TargetOpcode::G_CTTZ

2418 : TargetOpcode::G_CTTZ_ZERO_UNDEF

2419 : Cst->isZero() ? TargetOpcode::G_CTLZ

2420 : TargetOpcode::G_CTLZ_ZERO_UNDEF;

2421 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},

2423 return true;

2424 }

2425 case Intrinsic::invariant_start: {

2429 return true;

2430 }

2431 case Intrinsic::invariant_end:

2432 return true;

2433 case Intrinsic::expect:

2434 case Intrinsic::expect_with_probability:

2435 case Intrinsic::annotation:

2436 case Intrinsic::ptr_annotation:

2437 case Intrinsic::launder_invariant_group:

2438 case Intrinsic::strip_invariant_group: {

2439

2440 MIRBuilder.buildCopy(getOrCreateVReg(CI),

2442 return true;

2443 }

2444 case Intrinsic::assume:

2445 case Intrinsic::experimental_noalias_scope_decl:

2446 case Intrinsic::var_annotation:

2447 case Intrinsic::sideeffect:

2448

2449 return true;

2450 case Intrinsic::read_volatile_register:

2451 case Intrinsic::read_register: {

2453 MIRBuilder

2454 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})

2455 .addMetadata(cast(cast(Arg)->getMetadata()));

2456 return true;

2457 }

2458 case Intrinsic::write_register: {

2460 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)

2461 .addMetadata(cast(cast(Arg)->getMetadata()))

2463 return true;

2464 }

2465 case Intrinsic::localescape: {

2468

2469

2470

2473 if (isa(Arg))

2474 continue;

2475

2476 int FI = getOrCreateFrameIndex(*cast(Arg));

2479

2480

2481 auto LocalEscape =

2483 .addSym(FrameAllocSym)

2485

2486 EntryMBB.insert(EntryMBB.begin(), LocalEscape);

2487 }

2488

2489 return true;

2490 }

2491 case Intrinsic::vector_reduce_fadd:

2492 case Intrinsic::vector_reduce_fmul: {

2493

2494

2495 Register Dst = getOrCreateVReg(CI);

2498 unsigned Opc = 0;

2500

2501 Opc = ID == Intrinsic::vector_reduce_fadd

2502 ? TargetOpcode::G_VECREDUCE_SEQ_FADD

2503 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;

2504 MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},

2506 return true;

2507 }

2508

2509

2510 unsigned ScalarOpc;

2511 if (ID == Intrinsic::vector_reduce_fadd) {

2512 Opc = TargetOpcode::G_VECREDUCE_FADD;

2513 ScalarOpc = TargetOpcode::G_FADD;

2514 } else {

2515 Opc = TargetOpcode::G_VECREDUCE_FMUL;

2516 ScalarOpc = TargetOpcode::G_FMUL;

2517 }

2521 MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},

2523

2524 return true;

2525 }

2526 case Intrinsic:🪤

2527 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);

2528 case Intrinsic::debugtrap:

2529 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);

2530 case Intrinsic::ubsantrap:

2531 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);

2532 case Intrinsic::allow_runtime_check:

2533 case Intrinsic::allow_ubsan_check:

2534 MIRBuilder.buildCopy(getOrCreateVReg(CI),

2536 return true;

2537 case Intrinsic::amdgcn_cs_chain:

2538 return translateCallBase(CI, MIRBuilder);

2539 case Intrinsic::fptrunc_round: {

2541

2542

2544 std::optional RoundMode =

2546

2547

2548 MIRBuilder

2549 .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,

2550 {getOrCreateVReg(CI)},

2552 .addImm((int)*RoundMode);

2553

2554 return true;

2555 }

2556 case Intrinsic::is_fpclass: {

2559

2560 MIRBuilder

2561 .buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},

2562 {getOrCreateVReg(*FpValue)})

2564

2565 return true;

2566 }

2567 case Intrinsic::set_fpenv: {

2569 MIRBuilder.buildSetFPEnv(getOrCreateVReg(*FPEnv));

2570 return true;

2571 }

2572 case Intrinsic::reset_fpenv:

2574 return true;

2575 case Intrinsic::set_fpmode: {

2577 MIRBuilder.buildSetFPMode(getOrCreateVReg(*FPState));

2578 return true;

2579 }

2580 case Intrinsic::reset_fpmode:

2582 return true;

2583 case Intrinsic::vscale: {

2584 MIRBuilder.buildVScale(getOrCreateVReg(CI), 1);

2585 return true;

2586 }

2587 case Intrinsic::scmp:

2588 MIRBuilder.buildSCmp(getOrCreateVReg(CI),

2590 getOrCreateVReg(*CI.getOperand(1)));

2591 return true;

2592 case Intrinsic::ucmp:

2593 MIRBuilder.buildUCmp(getOrCreateVReg(CI),

2595 getOrCreateVReg(*CI.getOperand(1)));

2596 return true;

2597 case Intrinsic::vector_extract:

2598 return translateExtractVector(CI, MIRBuilder);

2599 case Intrinsic::vector_insert:

2600 return translateInsertVector(CI, MIRBuilder);

2601 case Intrinsic::stepvector: {

2603 return true;

2604 }

2605 case Intrinsic::prefetch: {

2607 unsigned RW = cast(CI.getOperand(1))->getZExtValue();

2608 unsigned Locality = cast(CI.getOperand(2))->getZExtValue();

2609 unsigned CacheType = cast(CI.getOperand(3))->getZExtValue();

2610

2614

2615 MIRBuilder.buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,

2616 MMO);

2617

2618 return true;

2619 }

2620

2621 case Intrinsic::vector_interleave2:

2622 case Intrinsic::vector_deinterleave2: {

2623

2627 return false;

2628

2629 if (CI.getIntrinsicID() == Intrinsic::vector_interleave2)

2630 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);

2631

2632 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);

2633 }

2634

2635#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \

2636 case Intrinsic::INTRINSIC:

2637#include "llvm/IR/ConstrainedOps.def"

2638 return translateConstrainedFPIntrinsic(cast(CI),

2639 MIRBuilder);

2640 case Intrinsic::experimental_convergence_anchor:

2641 case Intrinsic::experimental_convergence_entry:

2642 case Intrinsic::experimental_convergence_loop:

2643 return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);

2644 }

2645 return false;

2646}

2647

2648bool IRTranslator::translateInlineAsm(const CallBase &CB,

2650

2652

2653 if (!ALI) {

2655 dbgs() << "Inline asm lowering is not supported for this target yet\n");

2656 return false;

2657 }

2658

2660 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });

2661}

2662

2663bool IRTranslator::translateCallBase(const CallBase &CB,

2666

2669 Register SwiftErrorVReg = 0;

2670 for (const auto &Arg : CB.args()) {

2672 assert(SwiftInVReg == 0 && "Expected only one swift error argument");

2676 &CB, &MIRBuilder.getMBB(), Arg));

2678 SwiftErrorVReg =

2680 continue;

2681 }

2682 Args.push_back(getOrCreateVRegs(*Arg));

2683 }

2684

2685 if (auto *CI = dyn_cast(&CB)) {

2686 if (ORE->enabled()) {

2688 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);

2689 R.visit(CI);

2690 }

2691 }

2692 }

2693

2694 std::optionalCallLowering::PtrAuthInfo PAI;

2696

2698

2699 const Value *Key = Bundle->Inputs[0];

2701

2702

2703

2704

2705 const auto *CalleeCPA = dyn_cast(CB.getCalledOperand());

2706 if (!CalleeCPA || !isa(CalleeCPA->getPointer()) ||

2707 !CalleeCPA->isKnownCompatibleWith(Key, Discriminator, *DL)) {

2708

2709 Register DiscReg = getOrCreateVReg(*Discriminator);

2711 DiscReg};

2712 }

2713 }

2714

2715 Register ConvergenceCtrlToken = 0;

2717 const auto &Token = *Bundle->Inputs[0].get();

2718 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);

2719 }

2720

2721

2722

2723

2725 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,

2727

2728

2730 assert(!HasTailCall && "Can't tail call return twice from block?");

2733 }

2734

2736}

2737

2738bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {

2739 const CallInst &CI = cast(U);

2742

2743

2744

2745 if (F && (F->hasDLLImportStorageClass() ||

2747 F->hasExternalWeakLinkage())))

2748 return false;

2749

2750

2752 return false;

2753

2754

2755 if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))

2756 return false;

2757

2759 return translateInlineAsm(CI, MIRBuilder);

2760

2762

2764 if (F && F->isIntrinsic()) {

2765 ID = F->getIntrinsicID();

2768 }

2769

2771 return translateCallBase(CI, MIRBuilder);

2772

2774

2775 if (translateKnownIntrinsic(CI, ID, MIRBuilder))

2776 return true;

2777

2780 ResultRegs = getOrCreateVRegs(CI);

2781

2782

2783

2785 if (isa(CI))

2787

2789

2790

2791 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {

2792 if (ConstantInt *CI = dyn_cast(Arg.value())) {

2793

2794

2795 assert(CI->getBitWidth() <= 64 &&

2796 "large intrinsic immediates not handled");

2797 MIB.addImm(CI->getSExtValue());

2798 } else {

2799 MIB.addFPImm(cast(Arg.value()));

2800 }

2801 } else if (auto *MDVal = dyn_cast(Arg.value())) {

2802 auto *MD = MDVal->getMetadata();

2803 auto *MDN = dyn_cast(MD);

2804 if (!MDN) {

2805 if (auto *ConstMD = dyn_cast(MD))

2807 else

2808 return false;

2809 }

2811 } else {

2813 if (VRegs.size() > 1)

2814 return false;

2815 MIB.addUse(VRegs[0]);

2816 }

2817 }

2818

2819

2821

2823 Align Alignment = Info.align.value_or(

2824 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));

2825 LLT MemTy = Info.memVT.isSimple()

2827 : LLT::scalar(Info.memVT.getStoreSizeInBits());

2828

2829

2830

2832 if (Info.ptrVal)

2834 else if (Info.fallbackAddressSpace)

2838 }

2839

2842 auto *Token = Bundle->Inputs[0].get();

2843 Register TokenReg = getOrCreateVReg(*Token);

2845 }

2846 }

2847

2848 return true;

2849}

2850

2851bool IRTranslator::findUnwindDestinations(

2854 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>

2855 &UnwindDests) {

2862

2863 if (IsWasmCXX) {

2864

2865 return false;

2866 }

2867

2868 while (EHPadBB) {

2871 if (isa(Pad)) {

2872

2873 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);

2874 break;

2875 }

2876 if (isa(Pad)) {

2877

2878

2879 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);

2880 UnwindDests.back().first->setIsEHScopeEntry();

2881 UnwindDests.back().first->setIsEHFuncletEntry();

2882 break;

2883 }

2884 if (auto *CatchSwitch = dyn_cast(Pad)) {

2885

2886 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {

2887 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);

2888

2889 if (IsMSVCCXX || IsCoreCLR)

2890 UnwindDests.back().first->setIsEHFuncletEntry();

2891 if (!IsSEH)

2892 UnwindDests.back().first->setIsEHScopeEntry();

2893 }

2894 NewEHPadBB = CatchSwitch->getUnwindDest();

2895 } else {

2896 continue;

2897 }

2898

2900 if (BPI && NewEHPadBB)

2902 EHPadBB = NewEHPadBB;

2903 }

2904 return true;

2905}

2906

2907bool IRTranslator::translateInvoke(const User &U,

2909 const InvokeInst &I = cast(U);

2911

2912 const BasicBlock *ReturnBB = I.getSuccessor(0);

2913 const BasicBlock *EHPadBB = I.getSuccessor(1);

2914

2915 const Function *Fn = I.getCalledFunction();

2916

2917

2919 return false;

2920

2921

2922 if (I.hasDeoptState())

2923 return false;

2924

2925

2927 return false;

2928

2929

2930 if (!isa(EHPadBB->getFirstNonPHI()))

2931 return false;

2932

2933

2934

2938 return false;

2939

2940 bool LowerInlineAsm = I.isInlineAsm();

2941 bool NeedEHLabel = true;

2942

2943

2944

2945 MCSymbol *BeginSymbol = nullptr;

2946 if (NeedEHLabel) {

2947 MIRBuilder.buildInstr(TargetOpcode::G_INVOKE_REGION_START);

2949 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);

2950 }

2951

2952 if (LowerInlineAsm) {

2953 if (!translateInlineAsm(I, MIRBuilder))

2954 return false;

2955 } else if (!translateCallBase(I, MIRBuilder))

2956 return false;

2957

2958 MCSymbol *EndSymbol = nullptr;

2959 if (NeedEHLabel) {

2961 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);

2962 }

2963

2970

2971 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))

2972 return false;

2973

2975 &ReturnMBB = getMBB(*ReturnBB);

2976

2977 addSuccessorWithProb(InvokeMBB, &ReturnMBB);

2978 for (auto &UnwindDest : UnwindDests) {

2979 UnwindDest.first->setIsEHPad();

2980 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);

2981 }

2983

2984 if (NeedEHLabel) {

2985 assert(BeginSymbol && "Expected a begin symbol!");

2986 assert(EndSymbol && "Expected an end symbol!");

2987 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);

2988 }

2989

2990 MIRBuilder.buildBr(ReturnMBB);

2991 return true;

2992}

2993

2994bool IRTranslator::translateCallBr(const User &U,

2996

2997 return false;

2998}

2999

3000bool IRTranslator::translateLandingPad(const User &U,

3003

3005

3007

3008

3009

3013 return true;

3014

3015

3016

3017

3018

3020 return true;

3021

3022

3023

3024 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)

3026

3027

3028

3030 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))

3032

3036

3038 for (Type *Ty : cast(LP.getType())->elements())

3040 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");

3041

3042

3044 if (!ExceptionReg)

3045 return false;

3046

3049 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);

3050

3052 if (!SelectorReg)

3053 return false;

3054

3057 MIRBuilder.buildCopy(PtrVReg, SelectorReg);

3058 MIRBuilder.buildCast(ResRegs[1], PtrVReg);

3059

3060 return true;

3061}

3062

3063bool IRTranslator::translateAlloca(const User &U,

3065 auto &AI = cast(U);

3066

3068 return true;

3069

3071 Register Res = getOrCreateVReg(AI);

3072 int FI = getOrCreateFrameIndex(AI);

3074 return true;

3075 }

3076

3077

3079 return false;

3080

3081

3083 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());

3085 if (MRI->getType(NumElts) != IntPtrTy) {

3088 NumElts = ExtElts;

3089 }

3090

3092

3095 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));

3096 MIRBuilder.buildMul(AllocSize, NumElts, TySize);

3097

3098

3099

3100

3102 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);

3103 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,

3105 auto AlignCst =

3107 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);

3108

3109 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));

3110 if (Alignment <= StackAlign)

3111 Alignment = Align(1);

3112 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);

3113

3116 return true;

3117}

3118

3119bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {

3120

3121

3122

3123

3124 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},

3125 {getOrCreateVReg(*U.getOperand(0)),

3126 DL->getABITypeAlign(U.getType()).value()});

3127 return true;

3128}

3129

3130bool IRTranslator::translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {

3132 return true;

3133

3134 auto &UI = cast(U);

3135

3136

3137 if (const CallInst *Call = dyn_cast_or_null(UI.getPrevNode());

3138 Call && Call->doesNotReturn()) {

3140 return true;

3141

3142 if (Call->isNonContinuableTrap())

3143 return true;

3144 }

3145

3147 return true;

3148}

3149

3150bool IRTranslator::translateInsertElement(const User &U,

3152

3153

3154 if (auto *FVT = dyn_cast(U.getType());

3155 FVT && FVT->getNumElements() == 1)

3156 return translateCopy(U, *U.getOperand(1), MIRBuilder);

3157

3158 Register Res = getOrCreateVReg(U);

3159 Register Val = getOrCreateVReg(*U.getOperand(0));

3160 Register Elt = getOrCreateVReg(*U.getOperand(1));

3163 if (auto *CI = dyn_cast(U.getOperand(2))) {

3164 if (CI->getBitWidth() != PreferredVecIdxWidth) {

3165 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);

3166 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);

3167 Idx = getOrCreateVReg(*NewIdxCI);

3168 }

3169 }

3170 if (Idx)

3171 Idx = getOrCreateVReg(*U.getOperand(2));

3173 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);

3175 }

3177 return true;

3178}

3179

3180bool IRTranslator::translateInsertVector(const User &U,

3182 Register Dst = getOrCreateVReg(U);

3183 Register Vec = getOrCreateVReg(*U.getOperand(0));

3184 Register Elt = getOrCreateVReg(*U.getOperand(1));

3185

3186 ConstantInt *CI = cast(U.getOperand(2));

3188

3189

3190 if (CI->getBitWidth() != PreferredVecIdxWidth) {

3192 CI = ConstantInt::get(CI->getContext(), NewIdx);

3193 }

3194

3195

3196 if (auto *ResultType = dyn_cast(U.getOperand(1)->getType());

3197 ResultType && ResultType->getNumElements() == 1) {

3198 if (auto *InputType = dyn_cast(U.getOperand(0)->getType());

3199 InputType && InputType->getNumElements() == 1) {

3200

3201

3202

3203 return translateCopy(U, *U.getOperand(0), MIRBuilder);

3204 }

3205 if (isa(U.getOperand(0)->getType())) {

3206

3207

3208

3211 return true;

3212 }

3213 if (isa(U.getOperand(0)->getType())) {

3214

3215

3218 auto ScaledIndex = MIRBuilder.buildMul(

3219 VecIdxTy, MIRBuilder.buildVScale(VecIdxTy, 1), Idx);

3221 return true;

3222 }

3223 }

3224

3226 getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),

3227 getOrCreateVReg(*U.getOperand(1)), CI->getZExtValue());

3228 return true;

3229}

3230

3231bool IRTranslator::translateExtractElement(const User &U,

3233

3234

3236 dyn_cast(U.getOperand(0)->getType()))

3237 if (FVT->getNumElements() == 1)

3238 return translateCopy(U, *U.getOperand(0), MIRBuilder);

3239

3240 Register Res = getOrCreateVReg(U);

3241 Register Val = getOrCreateVReg(*U.getOperand(0));

3244 if (auto *CI = dyn_cast(U.getOperand(1))) {

3245 if (CI->getBitWidth() != PreferredVecIdxWidth) {

3247 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);

3248 Idx = getOrCreateVReg(*NewIdxCI);

3249 }

3250 }

3251 if (Idx)

3252 Idx = getOrCreateVReg(*U.getOperand(1));

3254 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);

3256 }

3258 return true;

3259}

3260

3261bool IRTranslator::translateExtractVector(const User &U,

3263 Register Res = getOrCreateVReg(U);

3264 Register Vec = getOrCreateVReg(*U.getOperand(0));

3265 ConstantInt *CI = cast(U.getOperand(1));

3267

3268

3269 if (CI->getBitWidth() != PreferredVecIdxWidth) {

3271 CI = ConstantInt::get(CI->getContext(), NewIdx);

3272 }

3273

3274

3275 if (auto *ResultType = dyn_cast(U.getType());

3276 ResultType && ResultType->getNumElements() == 1) {

3277 if (auto *InputType = dyn_cast(U.getOperand(0)->getType());

3278 InputType && InputType->getNumElements() == 1) {

3279

3280

3281 return translateCopy(U, *U.getOperand(0), MIRBuilder);

3282 }

3283 if (isa(U.getOperand(0)->getType())) {

3284

3285

3286

3289 return true;

3290 }

3291 if (isa(U.getOperand(0)->getType())) {

3292

3293

3296 auto ScaledIndex = MIRBuilder.buildMul(

3297 VecIdxTy, MIRBuilder.buildVScale(VecIdxTy, 1), Idx);

3299 return true;

3300 }

3301 }

3302

3304 getOrCreateVReg(*U.getOperand(0)),

3306 return true;

3307}

3308

3309bool IRTranslator::translateShuffleVector(const User &U,

3311

3312

3313

3314

3315 if (U.getOperand(0)->getType()->isScalableTy()) {

3316 Register Val = getOrCreateVReg(*U.getOperand(0));

3320 return true;

3321 }

3322

3324 if (auto *SVI = dyn_cast(&U))

3325 Mask = SVI->getShuffleMask();

3326 else

3327 Mask = cast(U).getShuffleMask();

3329 MIRBuilder

3330 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},

3331 {getOrCreateVReg(*U.getOperand(0)),

3332 getOrCreateVReg(*U.getOperand(1))})

3333 .addShuffleMask(MaskAlloc);

3334 return true;

3335}

3336

3337bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {

3338 const PHINode &PI = cast(U);

3339

3341 for (auto Reg : getOrCreateVRegs(PI)) {

3342 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});

3344 }

3345

3346 PendingPHIs.emplace_back(&PI, std::move(Insts));

3347 return true;

3348}

3349

3350bool IRTranslator::translateAtomicCmpXchg(const User &U,

3353

3355

3356 auto Res = getOrCreateVRegs(I);

3357 Register OldValRes = Res[0];

3358 Register SuccessRes = Res[1];

3359 Register Addr = getOrCreateVReg(*I.getPointerOperand());

3360 Register Cmp = getOrCreateVReg(*I.getCompareOperand());

3361 Register NewVal = getOrCreateVReg(*I.getNewValOperand());

3362

3364 OldValRes, SuccessRes, Addr, Cmp, NewVal,

3367 getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),

3368 I.getSuccessOrdering(), I.getFailureOrdering()));

3369 return true;

3370}

3371

3372bool IRTranslator::translateAtomicRMW(const User &U,

3376

3377 Register Res = getOrCreateVReg(I);

3378 Register Addr = getOrCreateVReg(*I.getPointerOperand());

3379 Register Val = getOrCreateVReg(*I.getValOperand());

3380

3381 unsigned Opcode = 0;

3382 switch (I.getOperation()) {

3383 default:

3384 return false;

3386 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;

3387 break;

3389 Opcode = TargetOpcode::G_ATOMICRMW_ADD;

3390 break;

3392 Opcode = TargetOpcode::G_ATOMICRMW_SUB;

3393 break;

3395 Opcode = TargetOpcode::G_ATOMICRMW_AND;

3396 break;

3398 Opcode = TargetOpcode::G_ATOMICRMW_NAND;

3399 break;

3401 Opcode = TargetOpcode::G_ATOMICRMW_OR;

3402 break;

3404 Opcode = TargetOpcode::G_ATOMICRMW_XOR;

3405 break;

3407 Opcode = TargetOpcode::G_ATOMICRMW_MAX;

3408 break;

3410 Opcode = TargetOpcode::G_ATOMICRMW_MIN;

3411 break;

3413 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;

3414 break;

3416 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;

3417 break;

3419 Opcode = TargetOpcode::G_ATOMICRMW_FADD;

3420 break;

3422 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;

3423 break;

3425 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;

3426 break;

3428 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;

3429 break;

3431 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;

3432 break;

3434 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;

3435 break;

3437 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;

3438 break;

3440 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;

3441 break;

3442 }

3443

3445 Opcode, Res, Addr, Val,

3447 Flags, MRI->getType(Val), getMemOpAlign(I),

3448 I.getAAMetadata(), nullptr, I.getSyncScopeID(),

3449 I.getOrdering()));

3450 return true;

3451}

3452

3453bool IRTranslator::translateFence(const User &U,

3455 const FenceInst &Fence = cast(U);

3458 return true;

3459}

3460

3461bool IRTranslator::translateFreeze(const User &U,

3465

3467 "Freeze with different source and destination type?");

3468

3469 for (unsigned I = 0; I < DstRegs.size(); ++I) {

3471 }

3472

3473 return true;

3474}

3475

3476void IRTranslator::finishPendingPhis() {

3477#ifndef NDEBUG

3478 DILocationVerifier Verifier;

3481#endif

3482 for (auto &Phi : PendingPHIs) {

3485 continue;

3488 EntryBuilder->setDebugLoc(PI->getDebugLoc());

3489#ifndef NDEBUG

3490 Verifier.setCurrentInst(PI);

3491#endif

3492

3497 for (auto *Pred : getMachinePredBBs({IRPred, PI->getParent()})) {

3499 continue;

3500 SeenPreds.insert(Pred);

3501 for (unsigned j = 0; j < ValRegs.size(); ++j) {

3503 MIB.addUse(ValRegs[j]);

3505 }

3506 }

3507 }

3508 }

3509}

3510

3511void IRTranslator::translateDbgValueRecord(Value *V, bool HasArgList,

3517 "Expected inlined-at fields to agree");

3518

3520

3521 if (!V || HasArgList) {

3522

3523

3525 return;

3526 }

3527

3528 if (const auto *CI = dyn_cast(V)) {

3530 return;

3531 }

3532

3533 if (auto *AI = dyn_cast(V);

3535

3536

3537

3538 auto ExprOperands = Expression->getElements();

3539 auto *ExprDerefRemoved =

3541 MIRBuilder.buildFIDbgValue(getOrCreateFrameIndex(*AI), Variable,

3542 ExprDerefRemoved);

3543 return;

3544 }

3545 if (translateIfEntryValueArgument(false, V, Variable, Expression, DL,

3546 MIRBuilder))

3547 return;

3548 for (Register Reg : getOrCreateVRegs(*V)) {

3549

3550

3551

3552

3554 }

3555}

3556

3557void IRTranslator::translateDbgDeclareRecord(Value *Address, bool HasArgList,

3563 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *Variable << "\n");

3564 return;

3565 }

3566

3568 "Expected inlined-at fields to agree");

3569 auto AI = dyn_cast(Address);

3571

3572

3574 getOrCreateFrameIndex(*AI), DL);

3575 return;

3576 }

3577

3578 if (translateIfEntryValueArgument(true, Address, Variable,

3580 MIRBuilder))

3581 return;

3582

3583

3584

3588 return;

3589}

3590

3591void IRTranslator::translateDbgInfo(const Instruction &Inst,

3594 if (DbgLabelRecord *DLR = dyn_cast(&DR)) {

3595 MIRBuilder.setDebugLoc(DLR->getDebugLoc());

3596 assert(DLR->getLabel() && "Missing label");

3597 assert(DLR->getLabel()->isValidLocationForIntrinsic(

3599 "Expected inlined-at fields to agree");

3601 continue;

3602 }

3610 else

3613 }

3614}

3615

3616bool IRTranslator::translate(const Instruction &Inst) {

3617 CurBuilder->setDebugLoc(Inst.getDebugLoc());

3618 CurBuilder->setPCSections(Inst.getMetadata(LLVMContext::MD_pcsections));

3619 CurBuilder->setMMRAMetadata(Inst.getMetadata(LLVMContext::MD_mmra));

3620

3622 return false;

3623

3625#define HANDLE_INST(NUM, OPCODE, CLASS) \

3626 case Instruction::OPCODE: \

3627 return translate##OPCODE(Inst, *CurBuilder.get());

3628#include "llvm/IR/Instruction.def"

3629 default:

3630 return false;

3631 }

3632}

3633

3635

3636

3637 if (auto CurrInstDL = CurBuilder->getDL())

3638 EntryBuilder->setDebugLoc(DebugLoc());

3639

3640 if (auto CI = dyn_cast(&C))

3641 EntryBuilder->buildConstant(Reg, *CI);

3642 else if (auto CF = dyn_cast(&C))

3643 EntryBuilder->buildFConstant(Reg, *CF);

3644 else if (isa(C))

3645 EntryBuilder->buildUndef(Reg);

3646 else if (isa(C))

3647 EntryBuilder->buildConstant(Reg, 0);

3648 else if (auto GV = dyn_cast(&C))

3649 EntryBuilder->buildGlobalValue(Reg, GV);

3650 else if (auto CPA = dyn_cast(&C)) {

3651 Register Addr = getOrCreateVReg(*CPA->getPointer());

3652 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());

3653 EntryBuilder->buildConstantPtrAuth(Reg, CPA, Addr, AddrDisc);

3654 } else if (auto CAZ = dyn_cast(&C)) {

3655 Constant &Elt = *CAZ->getElementValue(0u);

3656 if (isa(CAZ->getType())) {

3657 EntryBuilder->buildSplatVector(Reg, getOrCreateVReg(Elt));

3658 return true;

3659 }

3660

3661 unsigned NumElts = CAZ->getElementCount().getFixedValue();

3662 if (NumElts == 1)

3663 return translateCopy(C, Elt, *EntryBuilder);

3664

3665 EntryBuilder->buildSplatBuildVector(Reg, getOrCreateVReg(Elt));

3666 } else if (auto CV = dyn_cast(&C)) {

3667

3668 if (CV->getNumElements() == 1)

3669 return translateCopy(C, *CV->getElementAsConstant(0), *EntryBuilder);

3671 for (unsigned i = 0; i < CV->getNumElements(); ++i) {

3672 Constant &Elt = *CV->getElementAsConstant(i);

3673 Ops.push_back(getOrCreateVReg(Elt));

3674 }

3675 EntryBuilder->buildBuildVector(Reg, Ops);

3676 } else if (auto CE = dyn_cast(&C)) {

3677 switch(CE->getOpcode()) {

3678#define HANDLE_INST(NUM, OPCODE, CLASS) \

3679 case Instruction::OPCODE: \

3680 return translate##OPCODE(*CE, *EntryBuilder.get());

3681#include "llvm/IR/Instruction.def"

3682 default:

3683 return false;

3684 }

3685 } else if (auto CV = dyn_cast(&C)) {

3686 if (CV->getNumOperands() == 1)

3687 return translateCopy(C, *CV->getOperand(0), *EntryBuilder);

3689 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {

3690 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));

3691 }

3692 EntryBuilder->buildBuildVector(Reg, Ops);

3693 } else if (auto *BA = dyn_cast(&C)) {

3694 EntryBuilder->buildBlockAddress(Reg, BA);

3695 } else

3696 return false;

3697

3698 return true;

3699}

3700

3701bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,

3703 for (auto &BTB : SL->BitTestCases) {

3704

3705 if (!BTB.Emitted)

3706 emitBitTestHeader(BTB, BTB.Parent);

3707

3709 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {

3710 UnhandledProb -= BTB.Cases[j].ExtraProb;

3711

3713

3714

3715

3716

3717

3718

3719

3720

3722 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {

3723

3724

3725 NextMBB = BTB.Cases[j + 1].TargetBB;

3726 } else if (j + 1 == ej) {

3727

3728 NextMBB = BTB.Default;

3729 } else {

3730

3731 NextMBB = BTB.Cases[j + 1].ThisBB;

3732 }

3733

3734 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);

3735

3736 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {

3737

3738

3739

3740 addMachineCFGPred({BTB.Parent->getBasicBlock(),

3741 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},

3743

3744 BTB.Cases.pop_back();

3745 break;

3746 }

3747 }

3748

3749

3750 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),

3751 BTB.Default->getBasicBlock()};

3752 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);

3753 if (!BTB.ContiguousRange) {

3754 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);

3755 }

3756 }

3757 SL->BitTestCases.clear();

3758

3759 for (auto &JTCase : SL->JTCases) {

3760

3761 if (!JTCase.first.Emitted)

3762 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);

3763

3764 emitJumpTable(JTCase.second, JTCase.second.MBB);

3765 }

3766 SL->JTCases.clear();

3767

3768 for (auto &SwCase : SL->SwitchCases)

3769 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);

3770 SL->SwitchCases.clear();

3771

3772

3775 bool FunctionBasedInstrumentation =

3777 SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);

3778 }

3779

3781 LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");

3782 return false;

3786

3787

3788

3789

3790

3791

3792

3795

3796

3797 SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,

3798 ParentMBB->end());

3799

3800

3801 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))

3802 return false;

3803

3804

3806 if (FailureMBB->empty()) {

3807 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))

3808 return false;

3809 }

3810

3811

3813 }

3814 return true;

3815}

3816

3819 CurBuilder->setInsertPt(*ParentBB, ParentBB->end());

3820

3824

3827

3829 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);

3832

3833

3835 CurBuilder

3836 ->buildLoad(PtrMemTy, StackSlotPtr,

3839 .getReg(0);

3840

3842 LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");

3843 return false;

3844 }

3845

3846

3848

3849

3850

3851

3852

3853 (void)GuardCheckFn;

3854 return false;

3855#if 0

3856

3857

3858

3859 FunctionType *FnTy = GuardCheckFn->getFunctionType();

3860 assert(FnTy->getNumParams() == 1 && "Invalid function signature");

3862 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))

3863 Flags.setInReg();

3865 {GuardVal, FnTy->getParamType(0), {Flags}});

3866

3868 Info.OrigArgs.push_back(GuardArgInfo);

3869 Info.CallConv = GuardCheckFn->getCallingConv();

3871 Info.OrigRet = {Register(), FnTy->getReturnType()};

3872 if (!CLI->lowerCall(MIRBuilder, Info)) {

3873 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");

3874 return false;

3875 }

3876 return true;

3877#endif

3878 }

3879

3880

3881

3883 Guard =

3885 getStackGuard(Guard, *CurBuilder);

3886 } else {

3887

3889 Register GuardPtr = getOrCreateVReg(*IRGuard);

3890

3891 Guard = CurBuilder

3892 ->buildLoad(PtrMemTy, GuardPtr,

3896 .getReg(0);

3897 }

3898

3899

3900 auto Cmp =

3902

3903 CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());

3904

3906 return true;

3907}

3908

3911 CurBuilder->setInsertPt(*FailureBB, FailureBB->end());

3912

3915

3920 0};

3921 if (!CLI->lowerCall(*CurBuilder, Info)) {

3922 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");

3923 return false;

3924 }

3925

3926

3929 CurBuilder->buildInstr(TargetOpcode::G_TRAP);

3930

3931 return true;

3932}

3933

3934void IRTranslator::finalizeFunction() {

3935

3936

3937 PendingPHIs.clear();

3938 VMap.reset();

3939 FrameIndices.clear();

3940 MachinePreds.clear();

3941

3942

3943

3944 EntryBuilder.reset();

3945 CurBuilder.reset();

3946 FuncInfo.clear();

3948}

3949

3950

3951

3953 if (!IsVarArg)

3954 return false;

3955

3956

3957

3959 const auto *CI = dyn_cast(&I);

3960 return CI && CI->isMustTailCall();

3961 });

3962}

3963

3965 MF = &CurMF;

3968 getAnalysis().getCSEWrapper();

3969

3971 TPC = &getAnalysis();

3976

3977 if (EnableCSE) {

3978 EntryBuilder = std::make_unique(CurMF);

3980 EntryBuilder->setCSEInfo(CSEInfo);

3981 CurBuilder = std::make_unique(CurMF);

3982 CurBuilder->setCSEInfo(CSEInfo);

3983 } else {

3984 EntryBuilder = std::make_unique();

3985 CurBuilder = std::make_unique();

3986 }

3988 CurBuilder->setMF(*MF);

3989 EntryBuilder->setMF(*MF);

3991 DL = &F.getDataLayout();

3992 ORE = std::make_unique(&F);

3994 TM.resetTargetOptions(F);

3996 FuncInfo.MF = MF;

3997 if (EnableOpts) {

3998 AA = &getAnalysis().getAAResults();

3999 FuncInfo.BPI = &getAnalysis().getBPI();

4000 } else {

4001 AA = nullptr;

4002 FuncInfo.BPI = nullptr;

4003 }

4004

4005 AC = &getAnalysis().getAssumptionCache(

4007 LibInfo = &getAnalysis().getTLI(F);

4009

4010 SL = std::make_unique(this, FuncInfo);

4011 SL->init(*TLI, TM, *DL);

4012

4013 assert(PendingPHIs.empty() && "stale PHIs");

4014

4015

4016

4018

4020 F.getSubprogram(), &F.getEntryBlock());

4021 R << "unable to translate in big endian mode";

4023 return false;

4024 }

4025

4026

4027 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });

4028

4029

4032 EntryBuilder->setMBB(*EntryBB);

4033

4034 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();

4037

4038 bool IsVarArg = F.isVarArg();

4039 bool HasMustTailInVarArgFn = false;

4040

4041

4042 FuncInfo.MBBMap.resize(F.getMaxBlockNumber());

4045

4048

4051

4052 if (!HasMustTailInVarArgFn)

4054 }

4055

4057

4058

4059 EntryBB->addSuccessor(&getMBB(F.front()));

4060

4063 F.getSubprogram(), &F.getEntryBlock());

4064 R << "unable to lower function: "

4065 << ore::NV("Prototype", F.getFunctionType());

4067 return false;

4068 }

4069

4070

4072 for (const Argument &Arg: F.args()) {

4073 if (DL->getTypeStoreSize(Arg.getType()).isZero())

4074 continue;

4077

4078 if (Arg.hasSwiftErrorAttr()) {

4079 assert(VRegs.size() == 1 && "Too many vregs for Swift error");

4081 }

4082 }

4083

4086 F.getSubprogram(), &F.getEntryBlock());

4087 R << "unable to lower arguments: "

4088 << ore::NV("Prototype", F.getFunctionType());

4090 return false;

4091 }

4092

4093

4095 if (EnableCSE && CSEInfo)

4097 {

4099#ifndef NDEBUG

4100 DILocationVerifier Verifier;

4102#endif

4106

4107

4108 CurBuilder->setMBB(MBB);

4109 HasTailCall = false;

4111

4112

4113

4114

4115

4116 if (HasTailCall)

4117 break;

4118#ifndef NDEBUG

4119 Verifier.setCurrentInst(&Inst);

4120#endif

4121

4122

4123 translateDbgInfo(Inst, *CurBuilder);

4124

4125 if (translate(Inst))

4126 continue;

4127

4130 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);

4131

4132 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {

4133 std::string InstStrStorage;

4135 InstStr << Inst;

4136

4137 R << ": '" << InstStrStorage << "'";

4138 }

4139

4141 return false;

4142 }

4143

4144 if (!finalizeBasicBlock(*BB, MBB)) {

4146 BB->getTerminator()->getDebugLoc(), BB);

4147 R << "unable to translate basic block";

4149 return false;

4150 }

4151 }

4152#ifndef NDEBUG

4154#endif

4155 }

4156

4157 finishPendingPhis();

4158

4160

4161

4162

4163

4164 assert(EntryBB->succ_size() == 1 &&

4165 "Custom BB used for lowering should have only one successor");

4166

4169 "LLVM-IR entry block has a predecessor!?");

4170

4171

4172 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),

4173 EntryBB->end());

4174

4175

4179

4180

4181 EntryBB->removeSuccessor(&NewEntryBB);

4182 MF->remove(EntryBB);

4184

4186 "New entry wasn't next in the list of basic block!");

4187

4188

4191

4192 return false;

4193}

amdgpu aa AMDGPU Address space based Alias Analysis Wrapper

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

Analysis containing CSE Info

Provides analysis for continuously CSEing during GISel passes.

This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.

This file describes how to lower LLVM calls to machine code calls.

This file contains the declarations for the subclasses of Constant, which represent the different fla...

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

This contains common code to allow clients to notify changes to machine instr.

const HexagonInstrInfo * TII

IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)

static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)

Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.

static unsigned getConvOpcode(Intrinsic::ID ID)

static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)

static unsigned getConstrainedOpcode(Intrinsic::ID ID)

static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))

static bool isValInBlock(const Value *V, const BasicBlock *BB)

static bool isSwiftError(const Value *V)

This file declares the IRTranslator pass.

This file provides various utilities for inspecting and working with the control flow graph in LLVM I...

This file describes how to lower LLVM inline asm to machine code INLINEASM.

Legalize the Machine IR a function s Machine IR

Implement a low-level type suitable for MachineInstr level instruction selection.

Implement a low-level type suitable for MachineInstr level instruction selection.

This file declares the MachineIRBuilder class.

unsigned const TargetRegisterInfo * TRI

#define INITIALIZE_PASS_DEPENDENCY(depName)

#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)

#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)

This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.

const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB

const SmallVectorImpl< MachineOperand > & Cond

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

verify safepoint Safepoint IR Verifier

This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...

This file defines the SmallSet class.

This file defines the SmallVector class.

This file describes how to lower LLVM code to machine code.

Target-Independent Code Generator Pass Configuration Options pass.

A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.

bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)

Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...

Class for arbitrary precision integers.

APInt zextOrTrunc(unsigned width) const

Zero extend or truncate to width.

an instruction to allocate memory on the stack

bool isSwiftError() const

Return true if this alloca is used as a swifterror argument to a call.

bool isStaticAlloca() const

Return true if this alloca is in the entry block of the function and is a constant size.

Align getAlign() const

Return the alignment of the memory that is being allocated by the instruction.

PointerType * getType() const

Overload to return most specific pointer type.

Type * getAllocatedType() const

Return the type that is being allocated by the instruction.

const Value * getArraySize() const

Get the number of elements allocated.

Represent the analysis usage information of a pass.

AnalysisUsage & addRequired()

AnalysisUsage & addPreserved()

Add the specified Pass class to the set of analyses preserved by this pass.

This class represents an incoming formal argument to a Function.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

An immutable pass that tracks lazily created AssumptionCache objects.

An instruction that atomically checks whether a specified value is in a memory location,...

an instruction that atomically reads a memory location, combines it with another value,...

@ USubCond

Subtract only if no unsigned overflow.

@ Min

*p = old <signed v ? old : v

@ USubSat

*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.

@ UIncWrap

Increment one up to a maximum value.

@ Max

*p = old >signed v ? old : v

@ UMin

*p = old <unsigned v ? old : v

@ FMin

*p = minnum(old, v) minnum matches the behavior of llvm.minnum.

@ UMax

*p = old >unsigned v ? old : v

@ FMax

*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.

@ UDecWrap

Decrement one until a minimum value or zero.

Attribute getFnAttr(Attribute::AttrKind Kind) const

Return the attribute object that exists for the function.

StringRef getValueAsString() const

Return the attribute's value as a string.

LLVM Basic Block Representation.

unsigned getNumber() const

bool hasAddressTaken() const

Returns true if there are any uses of this basic block other than direct branches,...

const Instruction * getFirstNonPHI() const

Returns a pointer to the first instruction in this block that is not a PHINode instruction.

const Function * getParent() const

Return the enclosing method, or null if none.

const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const

Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...

const Instruction & back() const

const Module * getModule() const

Return the module owning the function this basic block belongs to, or nullptr if the function does no...

Legacy analysis pass which computes BlockFrequencyInfo.

Conditional or Unconditional Branch instruction.

BasicBlock * getSuccessor(unsigned i) const

bool isUnconditional() const

Value * getCondition() const

Legacy analysis pass which computes BranchProbabilityInfo.

Analysis providing branch probability information.

BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const

Get an edge's probability, relative to other out-edges of the Src.

static BranchProbability getZero()

static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

bool isInlineAsm() const

Check if this call is an inline asm statement.

std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const

Return an operand bundle by name, if present.

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Determine whether the argument or parameter has the given attribute.

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

unsigned countOperandBundlesOfType(StringRef Name) const

Return the number of operand bundles with the tag Name attached to this instruction.

Value * getCalledOperand() const

Value * getArgOperand(unsigned i) const

User::op_iterator arg_end()

Return the iterator pointing to the end of the argument list.

bool isConvergent() const

Determine if the invoke is convergent.

Intrinsic::ID getIntrinsicID() const

Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...

iterator_range< User::op_iterator > args()

Iteration adapter for range-for loops.

unsigned arg_size() const

AttributeList getAttributes() const

Return the attributes for this call.

This class represents a function call, abstracting a target machine's calling convention.

bool checkReturnTypeForCallConv(MachineFunction &MF) const

Toplevel function to check the return type based on the target calling convention.

virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const

This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...

virtual bool enableBigEndian() const

For targets which want to use big-endian can enable it with enableBigEndian() hook.

virtual bool supportSwiftError() const

virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const

This hook must be implemented to lower outgoing return values, described by Val, into the specified v...

virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const

This hook must be implemented to lower the given call instruction, including argument and return valu...

virtual bool fallBackToDAGISel(const MachineFunction &MF) const

This class is the base class for the comparison instructions.

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

@ FCMP_TRUE

1 1 1 1 Always true (always folded)

@ ICMP_SLT

signed less than

@ ICMP_SLE

signed less or equal

@ ICMP_UGT

unsigned greater than

@ ICMP_ULE

unsigned less or equal

@ FCMP_FALSE

0 0 0 0 Always false (always folded)

bool isFPPredicate() const

bool isIntPredicate() const

This is the shared class of boolean and integer constants.

static ConstantInt * getTrue(LLVMContext &Context)

bool isZero() const

This is just a convenience method to make client code smaller for a common code.

unsigned getBitWidth() const

getBitWidth - Return the scalar bitwidth of this constant.

uint64_t getZExtValue() const

Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...

const APInt & getValue() const

Return the constant as an APInt value reference.

This is an important base class in LLVM.

static Constant * getAllOnesValue(Type *Ty)

static Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

This is the common base class for constrained floating point intrinsics.

std::optional< fp::ExceptionBehavior > getExceptionBehavior() const

unsigned getNonMetadataArgCount() const

bool isEntryValue() const

Check if the expression consists of exactly one entry value operand.

static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)

Append the opcodes Ops to DIExpr.

bool isValidLocationForIntrinsic(const DILocation *DL) const

Check that a location is valid for this label.

bool isValidLocationForIntrinsic(const DILocation *DL) const

Check that a location is valid for this variable.

This class represents an Operation in the Expression.

A parsed version of the target data layout string in and methods for querying it.

unsigned getPointerSizeInBits(unsigned AS=0) const

Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...

const StructLayout * getStructLayout(StructType *Ty) const

Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...

IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const

Returns the type of a GEP index in AddressSpace.

TypeSize getTypeAllocSize(Type *Ty) const

Returns the offset in bytes between successive objects of the specified type, including alignment pad...

TypeSize getTypeSizeInBits(Type *Ty) const

Size examples:

TypeSize getTypeStoreSize(Type *Ty) const

Returns the maximum number of bytes that may be overwritten by storing the specified type.

Align getPointerABIAlignment(unsigned AS) const

Layout pointer alignment.

This represents the llvm.dbg.declare instruction.

Value * getAddress() const

This represents the llvm.dbg.label instruction.

DILabel * getLabel() const

Records a position in IR for a source label (DILabel).

Base class for non-instruction debug metadata records that have positions within IR.

DebugLoc getDebugLoc() const

This represents the llvm.dbg.value instruction.

Value * getValue(unsigned OpIdx=0) const

DILocalVariable * getVariable() const

DIExpression * getExpression() const

Record of a variable value-assignment, aka a non instruction representation of the dbg....

DIExpression * getExpression() const

Value * getVariableLocationOp(unsigned OpIdx) const

DILocalVariable * getVariable() const

bool isDbgDeclare() const

Class representing an expression and its matching format.

This instruction compares its operands according to the predicate given to the constructor.

An instruction for ordering other memory operations.

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this fence instruction.

AtomicOrdering getOrdering() const

Returns the ordering constraint of this fence instruction.

Class to represent fixed width SIMD vectors.

static FixedVectorType * get(Type *ElementType, unsigned NumElts)

BranchProbabilityInfo * BPI

void clear()

clear - Clear out all the function-specific state.

MachineBasicBlock * getMBB(const BasicBlock *BB) const

SmallVector< MachineBasicBlock * > MBBMap

A mapping from LLVM basic block number to their machine block.

bool CanLowerReturn

CanLowerReturn - true iff the function's return value can be lowered to registers.

bool skipFunction(const Function &F) const

Optional passes call this function to check whether the pass should be skipped.

const BasicBlock & getEntryBlock() const

DISubprogram * getSubprogram() const

Get the attached subprogram.

bool hasMinSize() const

Optimize this function for minimum size (-Oz).

Constant * getPersonalityFn() const

Get the personality function associated with this function.

const Function & getFunction() const

bool isIntrinsic() const

isIntrinsic - Returns true if the function's name starts with "llvm.".

bool hasOptNone() const

Do not optimize this function (-O0).

LLVMContext & getContext() const

getContext - Return a reference to the LLVMContext associated with this function.

The actual analysis pass wrapper.

Simple wrapper that does the following.

Abstract class that contains various methods for clients to notify about changes.

Simple wrapper observer that takes several observers, and calls each one for each event.

void removeObserver(GISelChangeObserver *O)

void addObserver(GISelChangeObserver *O)

static StringRef dropLLVMManglingEscape(StringRef Name)

If the given string begins with the GlobalValue name mangling escape character '\1',...

bool hasExternalWeakLinkage() const

bool hasDLLImportStorageClass() const

Module * getParent()

Get the module that this global value is contained inside of...

bool isTailCall(const MachineInstr &MI) const override

This instruction compares its operands according to the predicate given to the constructor.

bool runOnMachineFunction(MachineFunction &MF) override

runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...

IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...

Indirect Branch Instruction.

bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const

Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...

This instruction inserts a struct field of array element value into an aggregate value.

iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const

Return a range over the DbgRecords attached to this instruction.

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

const Module * getModule() const

Return the module owning the function this instruction belongs to or nullptr it the function does not...

bool hasMetadata() const

Return true if this instruction has any metadata attached to it.

MDNode * getMetadata(unsigned KindID) const

Get the metadata of given kind attached to this Instruction.

AAMDNodes getAAMetadata() const

Returns the AA metadata for this instruction.

unsigned getOpcode() const

Returns a member of one of the enums like Instruction::Add.

bool hasAllowReassoc() const LLVM_READONLY

Determine whether the allow-reassociation flag is set.

Intrinsic::ID getIntrinsicID() const

Return the intrinsic ID of this intrinsic.

constexpr LLT changeElementType(LLT NewEltTy) const

If this type is a vector, return a vector with the same number of elements but the new element type.

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr uint16_t getNumElements() const

Returns the number of elements in a vector LLT.

constexpr bool isVector() const

static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)

Get a low-level pointer in the given address space.

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr bool isPointer() const

constexpr LLT getElementType() const

Returns the vector's element type. Only valid for vector types.

static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)

Get a low-level fixed-width vector of some number of elements and element width.

constexpr bool isFixedVector() const

Returns true if the LLT is a fixed vector.

The landingpad instruction holds all of the information necessary to generate correct exception handl...

An instruction for reading from memory.

Value * getPointerOperand()

AtomicOrdering getOrdering() const

Returns the ordering constraint of this load instruction.

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this load instruction.

static LocationSize precise(uint64_t Value)

Context object for machine code objects.

MCSymbol * createTempSymbol()

Create a temporary symbol with a unique name.

MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)

Gets a symbol that will be defined to the final stack offset of a local variable after codegen.

MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...

static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)

TypeSize getSizeInBits() const

Returns the size of the specified MVT in bits.

unsigned pred_size() const

void normalizeSuccProbs()

Normalize probabilities of all successors so that the sum of them becomes one.

void setAddressTakenIRBlock(BasicBlock *BB)

Set this block to reflect that it corresponds to an IR-level basic block with a BlockAddress.

instr_iterator insert(instr_iterator I, MachineInstr *M)

Insert MI into the instruction list before I, possibly inside a bundle.

const BasicBlock * getBasicBlock() const

Return the LLVM basic block that this instance corresponded to originally.

void setSuccProbability(succ_iterator I, BranchProbability Prob)

Set successor probability of a given iterator.

succ_iterator succ_begin()

void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())

Add Succ as a successor of this MachineBasicBlock.

SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator

void sortUniqueLiveIns()

Sorts and uniques the LiveIns vector.

bool isPredecessor(const MachineBasicBlock *MBB) const

Return true if the specified MBB is a predecessor of this block.

void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())

Adds the specified register as a live in.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

void splice(iterator Where, MachineBasicBlock *Other, iterator From)

Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...

void setIsEHPad(bool V=true)

Indicates the block is a landing pad.

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

bool hasVarSizedObjects() const

This method may be called any time after instruction selection is complete to determine if the stack ...

int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)

Create a new statically sized stack object, returning a nonnegative identifier to represent it.

int getStackProtectorIndex() const

Return the index for the stack protector object.

void setStackProtectorIndex(int I)

int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca)

Notify the MachineFrameInfo object that a variable sized object has been created.

void setHasMustTailInVarArgFunc(bool B)

MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.

ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

StringRef getName() const

getName - Return the name of the corresponding LLVM function.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

unsigned getTypeIDFor(const GlobalValue *TI)

Return the type id for the specified typeinfo. This is function wide.

void push_back(MachineBasicBlock *MBB)

void setHasFakeUses(bool V)

MCContext & getContext() const

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)

Add a new panding pad, and extract the exception handling information from the landingpad instruction...

void deleteMachineBasicBlock(MachineBasicBlock *MBB)

DeleteMachineBasicBlock - Delete the given MachineBasicBlock.

Function & getFunction()

Return the LLVM function that this machine code represents.

void remove(iterator MBBI)

void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr, int Slot, const DILocation *Loc)

Collect information used to emit debugging information of a variable in a stack slot.

const MachineBasicBlock & front() const

void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)

Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...

MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)

CreateMachineBasicBlock - Allocate a new MachineBasicBlock.

void erase(iterator MBBI)

void insert(iterator MBBI, MachineBasicBlock *MBB)

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

Helper class to build MachineInstr.

MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0)

Build and insert Res = G_FPTOUI_SAT Src0.

MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)

Build and insert Dst = G_FREEZE Src.

MachineInstrBuilder buildBr(MachineBasicBlock &Dest)

Build and insert G_BR Dest.

std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)

Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)

MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_ADD Op0, Op1.

MachineInstrBuilder buildUndef(const DstOp &Res)

Build and insert Res = IMPLICIT_DEF.

MachineInstrBuilder buildResetFPMode()

Build and insert G_RESET_FPMODE.

MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FPEXT Op.

MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0)

Build and insert Res = G_FPTOSI_SAT Src0.

MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)

Build and insert a Res = G_UCMP Op0, Op1.

MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)

Build and insert Res = G_JUMP_TABLE JTI.

MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)

Build and insert a Res = G_SCMP Op0, Op1.

MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)

Build and insert G_FENCE Ordering, Scope.

MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert a Res = G_SELECT Tst, Op0, Op1.

MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FMA Op0, Op1, Op2.

MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_MUL Op0, Op1.

MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)

Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.

MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)

Build and insert Res = G_AND Op0, Op1.

MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)

Build and insert an appropriate cast between two registers of equal size.

MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert a Res = G_ICMP Pred, Op0, Op1.

MachineBasicBlock::iterator getInsertPt()

Current insertion point for new instructions.

MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)

Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...

MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)

Build and insert OldValRes = G_ATOMICRMW_ Addr, Val, MMO.

MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_SUB Op0, Op1.

MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)

Build and insert a G_INTRINSIC instruction.

MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)

Build and insert Res = G_VSCALE MinElts.

MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)

Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.

MachineInstrBuilder buildSetFPMode(const SrcOp &Src)

Build and insert G_SET_FPMODE Src.

MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...

MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...

MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)

Build and insert G_BRCOND Tst, Dest.

MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)

Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.

MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert Res = G_LOAD Addr, MMO.

MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_PTR_ADD Op0, Op1.

MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)

Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...

MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)

Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.

MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert G_STORE Val, Addr, MMO.

MachineInstrBuilder buildInstr(unsigned Opcode)

Build and insert = Opcode .

MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)

Build and insert Res = G_FRAME_INDEX Idx.

MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...

MachineInstrBuilder buildDbgLabel(const MDNode *Label)

Build and insert a DBG_LABEL instructions specifying that Label is given.

MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)

Build and insert G_BRJT TablePtr, JTI, IndexReg.

MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)

Build and insert Res = G_DYN_STACKALLOC Size, Align.

MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...

MachineInstrBuilder buildResetFPEnv()

Build and insert G_RESET_FPENV.

void setDebugLoc(const DebugLoc &DL)

Set the debug location to DL for all the next build instructions.

const MachineBasicBlock & getMBB() const

Getter for the basic block we currently build.

MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)

Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.

MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)

Build and insert OldValRes, SuccessRes = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...

void setMBB(MachineBasicBlock &MBB)

Set the insertion point to the end of MBB.

const DebugLoc & getDebugLoc()

Get the current instruction's debug location.

MachineInstrBuilder buildTrap(bool Debug=false)

Build and insert G_TRAP or G_DEBUGTRAP.

MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)

Build and insert Fract, Exp = G_FFREXP Src.

MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FPTRUNC Op.

MachineInstrBuilder buildFSincos(const DstOp &Sin, const DstOp &Cos, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)

Build and insert Sin, Cos = G_FSINCOS Src.

MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)

Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.

MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)

Build but don't insert = Opcode .

MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)

Build and insert Res = COPY Op.

MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)

Build and insert G_PREFETCH Addr, RW, Locality, CacheType.

MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)

Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.

const DataLayout & getDataLayout() const

MachineInstrBuilder buildBrIndirect(Register Tgt)

Build and insert G_BRINDIRECT Tgt.

MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)

Build and insert Res = G_SPLAT_VECTOR Val.

MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)

Build and insert Res = G_STEP_VECTOR Step.

virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)

Build and insert Res = G_CONSTANT Val.

MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert a Res = G_FCMP PredOp0, Op1.

MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FADD Op0, Op1.

MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)

Build and insert G_SET_FPENV Src.

Register getReg(unsigned Idx) const

Get the register for the operand index.

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & addMetadata(const MDNode *MD) const

const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const

const MachineInstrBuilder & addFrameIndex(int Idx) const

const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register use operand.

const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const

MachineInstr * getInstr() const

If conversion operators fail, use this method to get the MachineInstr explicitly.

const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register definition operand.

Representation of each machine instruction.

void copyIRFlags(const Instruction &I)

Copy all flags to MachineInst MIFlags.

static uint32_t copyFlagsFromInstruction(const Instruction &I)

const MachineOperand & getOperand(unsigned i) const

A description of a memory reference used in the backend.

Flags

Flags values. These may be or'd together.

@ MOVolatile

The memory access is volatile.

@ MODereferenceable

The memory access is dereferenceable (i.e., doesn't trap).

@ MOLoad

The memory access reads data.

@ MOInvariant

The memory access always returns the same value (or traps).

@ MOStore

The memory access writes data.

static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)

Register getReg() const

getReg - Returns the register number.

static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)

MachineInstr * getVRegDef(Register Reg) const

getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...

LLT getType(Register Reg) const

Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.

void setRegClass(Register Reg, const TargetRegisterClass *RC)

setRegClass - Set the register class of the specified virtual register.

Register createGenericVirtualRegister(LLT Ty, StringRef Name="")

Create and return a new generic virtual register with low-level type Ty.

void addPhysRegsUsedFromRegMask(const uint32_t *RegMask)

addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.

Representation for a specific memory location.

A Module instance is used to store all the information related to an LLVM module.

BasicBlock * getIncomingBlock(unsigned i) const

Return incoming basic block number i.

Value * getIncomingValue(unsigned i) const

Return incoming value number x.

unsigned getNumIncomingValues() const

Return the number of incoming edges.

static PointerType * getUnqual(Type *ElementType)

This constructs a pointer to an object of the specified type in the default address space (address sp...

Class to install both of the above.

Wrapper class representing virtual and physical registers.

MCRegister asMCReg() const

Utility to check-convert this value to a MCRegister.

Return a value (possibly void), from a function.

Value * getReturnValue() const

Convenience accessor. Returns null if there is no return value.

This class represents the LLVM 'select' instruction.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...

size_type count(const T &V) const

count - Return 1 if the element is in the set, 0 otherwise.

std::pair< const_iterator, bool > insert(const T &V)

insert - Insert an element into the set if it isn't already there.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...

void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, bool FunctionBasedInstrumentation)

Initialize the stack protector descriptor structure for a new basic block.

MachineBasicBlock * getSuccessMBB()

void resetPerBBState()

Reset state that changes when we handle different basic blocks.

void resetPerFunctionState()

Reset state that only changes when we switch functions.

MachineBasicBlock * getFailureMBB()

MachineBasicBlock * getParentMBB()

bool shouldEmitStackProtector() const

Returns true if all fields of the stack protector descriptor are initialized implying that we should/...

bool shouldEmitFunctionBasedCheckStackProtector() const

bool shouldEmitSDCheck(const BasicBlock &BB) const

void copyToMachineFrameInfo(MachineFrameInfo &MFI) const

An instruction for storing to memory.

StringRef - Represent a constant reference to a string, i.e.

constexpr bool empty() const

empty - Check if the string is empty.

constexpr const char * data() const

data - Get a pointer to the start of the string (which may not be null terminated).

TypeSize getElementOffset(unsigned Idx) const

Class to represent struct types.

bool createEntriesInEntryBlock(DebugLoc DbgLoc)

Create initial definitions of swifterror values in the entry block of the current function.

void setFunction(MachineFunction &MF)

Initialize data structures for specified new function.

void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)

Set the swifterror virtual register in the VRegDefMap for this basic block.

Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)

Get or create the swifterror value virtual register for a use of a swifterror by an instruction.

Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)

Get or create the swifterror value virtual register for a def of a swifterror by an instruction.

const Value * getFunctionArg() const

Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...

void propagateVRegs()

Propagate assigned swifterror vregs through a function, synthesizing PHI nodes when needed to maintai...

Align getStackAlign() const

getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...

TargetInstrInfo - Interface to description of machine instruction set.

virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const

Return true if an FMA operation is faster than a pair of fmul and fadd instructions.

virtual unsigned getVaListSizeInBits(const DataLayout &DL) const

Returns the size of the platform's va_list object.

EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const

Return the EVT corresponding to this LLVM type.

CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const

Get the CallingConv that should be used for the specified libcall.

virtual bool useStackGuardXorFP() const

If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...

virtual MVT getVectorIdxTy(const DataLayout &DL) const

Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...

const TargetMachine & getTargetMachine() const

virtual Value * getSDagStackGuard(const Module &M) const

Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...

bool isJumpExpensive() const

Return true if Flow Control is an expensive operation that should be avoided.

virtual Function * getSSPStackGuardCheck(const Module &M) const

If the target has a standard stack protection check function that performs validation and error handl...

MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const

virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const

Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...

MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const

MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const

virtual bool fallBackToDAGISel(const Instruction &Inst) const

virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const

If a physical register, this returns the register that receives the exception address on entry to an ...

const char * getLibcallName(RTLIB::Libcall Call) const

Get the libcall routine name for the specified libcall.

virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const

If a physical register, this returns the register that receives the exception typeid on entry to a la...

virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const

Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...

virtual bool useLoadStackGuardNode(const Module &M) const

If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...

Primary interface to the complete machine description for the target machine.

CodeGenOptLevel getOptLevel() const

Returns the optimization level: None, Less, Default, or Aggressive.

virtual const TargetIntrinsicInfo * getIntrinsicInfo() const

If intrinsic information is available, return it. If not, return null.

const Triple & getTargetTriple() const

unsigned NoTrapAfterNoreturn

Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...

unsigned TrapUnreachable

Emit target-specific trap instruction for 'unreachable' IR instructions.

Target-Independent Code Generator Pass Configuration Options.

virtual std::unique_ptr< CSEConfigBase > getCSEConfig() const

Returns the CSEConfig object to use for the current optimization level.

virtual bool isGISelCSEEnabled() const

Check whether continuous CSE should be enabled in GISel passes.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

virtual const InlineAsmLowering * getInlineAsmLowering() const

virtual const TargetRegisterInfo * getRegisterInfo() const

getRegisterInfo - If register information is available, return it.

virtual const CallLowering * getCallLowering() const

virtual const TargetFrameLowering * getFrameLowering() const

virtual const TargetInstrInfo * getInstrInfo() const

virtual const TargetLowering * getTargetLowering() const

bool isOSWindows() const

Tests whether the OS is Windows.

Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...

The instances of the Type class are immutable: once they are created, they are never changed.

bool isEmptyTy() const

Return true if this type is empty, that is, it has no elements or all of its elements are empty.

TypeID

Definitions of all of the base types for the Type system.

static Type * getVoidTy(LLVMContext &C)

bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const

Return true if it makes sense to take the size of this type.

bool isAggregateType() const

Return true if the type is an aggregate type.

static IntegerType * getInt32Ty(LLVMContext &C)

bool isTokenTy() const

Return true if this is 'token'.

bool isVoidTy() const

Return true if this is 'void'.

Value * getOperand(unsigned i) const

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

bool hasOneUse() const

Return true if there is exactly one use of this value.

const Value * stripPointerCasts() const

Strip off pointer casts, all-zero GEPs and address space casts.

LLVMContext & getContext() const

All values hold a context through their type.

int getNumOccurrences() const

constexpr bool isZero() const

const ParentTy * getParent() const

NodeTy * getNextNode()

Get the next node, or nullptr for the list tail.

A raw_ostream that writes to an std::string.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Args[]

Key for Kernel::Metadata::mArgs.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

@ C

The default llvm calling convention, compatible with C.

ID ArrayRef< Type * > Tys

bool match(Val *V, const Pattern &P)

specificval_ty m_Specific(const Value *V)

Match if we have a specific specified value.

TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)

Matches ExtractElementInst.

OneUse_match< T > m_OneUse(const T &SubPattern)

auto m_LogicalOr()

Matches L || R where L and R are arbitrary values.

class_match< Value > m_Value()

Match an arbitrary value and ignore it.

auto m_LogicalAnd()

Matches L && R where L and R are arbitrary values.

BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)

Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.

Libcall

RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.

@ Implicit

Not emitted register (e.g. carry, or temporary result).

@ Undef

Value of the register doesn't matter.

Offsets

Offsets in bytes from the start of the input buffer.

SmallVector< SwitchWorkListItem, 4 > SwitchWorkList

std::vector< CaseCluster > CaseClusterVector

void sortAndRangeify(CaseClusterVector &Clusters)

Sort Clusters and merge adjacent cases.

CaseClusterVector::iterator CaseClusterIt

@ CC_Range

A cluster of adjacent case labels with the same destination, or just one case.

@ CC_JumpTable

A cluster of cases suitable for jump table lowering.

@ CC_BitTests

A cluster of cases suitable for bit test lowering.

@ CE

Windows NT (Windows on ARM)

Reg

All possible values of the reg field in the ModR/M byte.

initializer< Ty > init(const Ty &Val)

ExceptionBehavior

Exception behavior used for floating point operations.

@ ebIgnore

This corresponds to "fpexcept.ignore".

DiagnosticInfoOptimizationBase::Argument NV

NodeAddr< PhiNode * > Phi

NodeAddr< CodeNode * > Code

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

@ Low

Lower the current thread's priority such that it does not affect foreground tasks significantly.

int popcount(T Value) noexcept

Count the number of set bits in a value.

bool isUIntN(unsigned N, uint64_t x)

Checks if an unsigned integer fits into the given (dynamic) bit width.

detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

int countr_one(T Value)

Count the number of ones from the least significant bit to the first zero bit.

void diagnoseDontCall(const CallInst &CI)

auto successors(const MachineBasicBlock *BB)

MVT getMVTForLLT(LLT Ty)

Get a rough equivalent of an MVT for a given LLT.

gep_type_iterator gep_type_end(const User *GEP)

MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)

Find the split point at which to splice the end of BB into its success stack protector check machine ...

LLT getLLTForMVT(MVT Ty)

Get a rough equivalent of an LLT for a given MVT.

int countr_zero(T Val)

Count number of 0's from the least significant bit to the most stopping at the first 1.

Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to infer an alignment for the specified pointer.

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)

Create a stride shuffle mask.

auto reverse(ContainerTy &&C)

void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)

computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...

void sort(IteratorTy Start, IteratorTy End)

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

void report_fatal_error(Error Err, bool gen_crash_diag=true)

Report a serious error, calling any installed error handler.

auto succ_size(const MachineBasicBlock *BB)

EHPersonality classifyEHPersonality(const Value *Pers)

See if the given exception handling personality function is one that we understand.

CodeGenOptLevel

Code generation optimization level.

@ Global

Append to llvm.global_dtors.

@ First

Helpers to iterate all locations in the MemoryEffectsBase class.

void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)

Modify analysis usage so it preserves passes required for the SelectionDAG fallback.

auto lower_bound(R &&Range, T &&Value)

Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...

llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)

Create an interleave shuffle mask.

bool isAsynchronousEHPersonality(EHPersonality Pers)

Returns true if this personality function catches asynchronous exceptions.

OutputIt copy(R &&Range, OutputIt Out)

std::optional< RoundingMode > convertStrToRoundingMode(StringRef)

Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...

gep_type_iterator gep_type_begin(const User *GEP)

GlobalValue * ExtractTypeInfo(Value *V)

ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.

void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)

This method is similar to getUnderlyingObject except that it can look through phi and select instruct...

Align commonAlignment(Align A, uint64_t Offset)

Returns the alignment that satisfies both alignments.

LLT getLLTForType(Type &Ty, const DataLayout &DL)

Construct a low-level type based on an LLVM type.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

A collection of metadata nodes that might be associated with a memory access used by the alias-analys...

This struct is a compact representation of a valid (non-zero power of two) alignment.

uint64_t value() const

This is a hole in the type system and should not be abused.

Pair of physical register and lane mask.

This class contains a discriminated union of information about pointers in memory operands,...

static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

MachineBasicBlock * Parent

This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...

BranchProbability TrueProb

MachineBasicBlock * ThisBB

struct PredInfoPair PredInfo

BranchProbability FalseProb

MachineBasicBlock * TrueBB

MachineBasicBlock * FalseBB