LLVM: lib/CodeGen/GlobalISel/IRTranslator.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

64#include "llvm/IR/IntrinsicsAMDGPU.h"

84#include

85#include

86#include

87#include

88#include

89#include

90#include

91#include

92

93#define DEBUG_TYPE "irtranslator"

94

95using namespace llvm;

96

99 cl::desc("Should enable CSE in irtranslator"),

102

104 false, false)

112

116 MF.getProperties().setFailedISel();

117 bool IsGlobalISelAbortEnabled =

119

120

121

122 if (!R.getLocation().isValid() || IsGlobalISelAbortEnabled)

123 R << (" (in function: " + MF.getName() + ")").str();

124

125 if (IsGlobalISelAbortEnabled)

127 else

128 ORE.emit(R);

129}

130

133

134#ifndef NDEBUG

135namespace {

136

137

140

141public:

142 DILocationVerifier() = default;

143 ~DILocationVerifier() override = default;

144

145 const Instruction *getCurrentInst() const { return CurrInst; }

146 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }

147

148 void erasingInstr(MachineInstr &MI) override {}

149 void changingInstr(MachineInstr &MI) override {}

150 void changedInstr(MachineInstr &MI) override {}

151

152 void createdInstr(MachineInstr &MI) override {

153 assert(getCurrentInst() && "Inserted instruction without a current MI");

154

155

156#ifndef NDEBUG

157 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst

158 << " was copied to " << MI);

159#endif

160

161

162

164 (MI.getParent()->isEntryBlock() && MI.getDebugLoc()) ||

165 (MI.isDebugInstr())) &&

166 "Line info was not transferred to all instructions");

167 }

168};

169}

170#endif

171

172

187

188IRTranslator::ValueToVRegInfo::VRegListT &

189IRTranslator::allocateVRegs(const Value &Val) {

190 auto VRegsIt = VMap.findVRegs(Val);

191 if (VRegsIt != VMap.vregs_end())

192 return *VRegsIt->second;

193 auto *Regs = VMap.getVRegs(Val);

194 auto *Offsets = VMap.getOffsets(Val);

197 Offsets->empty() ? Offsets : nullptr);

198 for (unsigned i = 0; i < SplitTys.size(); ++i)

199 Regs->push_back(0);

200 return *Regs;

201}

202

204 auto VRegsIt = VMap.findVRegs(Val);

205 if (VRegsIt != VMap.vregs_end())

206 return *VRegsIt->second;

207

209 return *VMap.getVRegs(Val);

210

211

212 auto *VRegs = VMap.getVRegs(Val);

213 auto *Offsets = VMap.getOffsets(Val);

214

217 "Don't know how to create an empty vreg");

218

221 Offsets->empty() ? Offsets : nullptr);

222

224 for (auto Ty : SplitTys)

225 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));

226 return *VRegs;

227 }

228

230

232 unsigned Idx = 0;

233 while (auto Elt = C.getAggregateElement(Idx++)) {

234 auto EltRegs = getOrCreateVRegs(*Elt);

236 }

237 } else {

238 assert(SplitTys.size() == 1 && "unexpectedly split LLT");

239 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));

242 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",

243 MF->getFunction().getSubprogram(),

244 &MF->getFunction().getEntryBlock());

245 R << "unable to translate constant: " << ore::NV("Type", Val.getType());

247 return *VRegs;

248 }

249 }

250

251 return *VRegs;

252}

253

254int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {

255 auto [MapEntry, Inserted] = FrameIndices.try_emplace(&AI);

256 if (!Inserted)

257 return MapEntry->second;

258

259 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());

260 uint64_t Size =

262

263

264 Size = std::max<uint64_t>(Size, 1u);

265

266 int &FI = MapEntry->second;

267 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);

268 return FI;

269}

270

273 return SI->getAlign();

275 return LI->getAlign();

280

281 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);

282 R << "unable to translate memop: " << ore::NV("Opcode", &I);

285}

286

288 MachineBasicBlock *MBB = FuncInfo.getMBB(&BB);

289 assert(MBB && "BasicBlock was not encountered before");

290 return *MBB;

291}

292

294 assert(NewPred && "new predecessor must be a real MachineBasicBlock");

295 MachinePreds[Edge].push_back(NewPred);

296}

297

301

303

304

305

306 return U.getType()->getScalarType()->isBFloatTy() ||

308 return V->getType()->getScalarType()->isBFloatTy();

309 });

310}

311

312bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,

315 return false;

316

317

318

319

320

321 Register Op0 = getOrCreateVReg(*U.getOperand(0));

322 Register Op1 = getOrCreateVReg(*U.getOperand(1));

323 Register Res = getOrCreateVReg(U);

324 uint32_t Flags = 0;

328 }

329

331 return true;

332}

333

334bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,

337 return false;

338

339 Register Op0 = getOrCreateVReg(*U.getOperand(0));

340 Register Res = getOrCreateVReg(U);

341 uint32_t Flags = 0;

345 }

347 return true;

348}

349

350bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {

351 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);

352}

353

354bool IRTranslator::translateCompare(const User &U,

357 return false;

358

360 Register Op0 = getOrCreateVReg(*U.getOperand(0));

361 Register Op1 = getOrCreateVReg(*U.getOperand(1));

362 Register Res = getOrCreateVReg(U);

366 MIRBuilder.buildICmp(Pred, Res, Op0, Op1, Flags);

373 else

374 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags);

375

376 return true;

377}

378

379bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {

382 if (Ret && DL->getTypeStoreSize(Ret->getType()).isZero())

383 Ret = nullptr;

384

386 if (Ret)

387 VRegs = getOrCreateVRegs(*Ret);

388

390 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {

391 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(

392 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());

393 }

394

395

396

397

398 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);

399}

400

401void IRTranslator::emitBranchForMergedCondition(

405

406

410 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();

411 } else {

413 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();

414 }

415

416 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),

417 BOp->getOperand(1), nullptr, TBB, FBB, CurBB,

418 CurBuilder->getDebugLoc(), TProb, FProb);

419 SL->SwitchCases.push_back(CB);

420 return;

421 }

422

423

425 SwitchCG::CaseBlock CB(

427 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);

428 SL->SwitchCases.push_back(CB);

429}

430

433 return I->getParent() == BB;

434 return true;

435}

436

437void IRTranslator::findMergedConditions(

442 using namespace PatternMatch;

443 assert((Opc == Instruction::And || Opc == Instruction::Or) &&

444 "Expected Opc to be AND/OR");

445

446

450 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,

451 !InvertCond);

452 return;

453 }

454

456 const Value *BOpOp0, *BOpOp1;

457

458

459

460

461

463 if (BOp) {

465 ? Instruction::And

467 ? Instruction::Or

469 if (InvertCond) {

470 if (BOpc == Instruction::And)

471 BOpc = Instruction::Or;

472 else if (BOpc == Instruction::Or)

473 BOpc = Instruction::And;

474 }

475 }

476

477

478

479 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();

483 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,

484 InvertCond);

485 return;

486 }

487

488

490 MachineBasicBlock *TmpBB =

491 MF->CreateMachineBasicBlock(CurBB->getBasicBlock());

493

494 if (Opc == Instruction::Or) {

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515 auto NewTrueProb = TProb / 2;

516 auto NewFalseProb = TProb / 2 + FProb;

517

518 findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,

519 NewFalseProb, InvertCond);

520

521

524

525 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],

526 Probs[1], InvertCond);

527 } else {

528 assert(Opc == Instruction::And && "Unknown merge op!");

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548 auto NewTrueProb = TProb + FProb / 2;

549 auto NewFalseProb = FProb / 2;

550

551 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,

552 NewFalseProb, InvertCond);

553

554

557

558 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],

559 Probs[1], InvertCond);

560 }

561}

562

563bool IRTranslator::shouldEmitAsBranches(

564 const std::vectorSwitchCG::CaseBlock &Cases) {

565

566 if (Cases.size() != 2)

567 return true;

568

569

570

571 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&

572 Cases[0].CmpRHS == Cases[1].CmpRHS) ||

573 (Cases[0].CmpRHS == Cases[1].CmpLHS &&

574 Cases[0].CmpLHS == Cases[1].CmpRHS)) {

575 return false;

576 }

577

578

579

580 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&

581 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&

585 Cases[0].TrueBB == Cases[1].ThisBB)

586 return false;

588 Cases[0].FalseBB == Cases[1].ThisBB)

589 return false;

590 }

591

592 return true;

593}

594

597 auto &CurMBB = MIRBuilder.getMBB();

598 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));

599

601

603 !CurMBB.isLayoutSuccessor(Succ0MBB))

604 MIRBuilder.buildBr(*Succ0MBB);

605

606

607 for (const BasicBlock *Succ : successors(&BrInst))

608 CurMBB.addSuccessor(&getMBB(*Succ));

609 return true;

610 }

611

612

613

615 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));

616

617

618

619

620

621

622

623

624

625

626

627

628

629

630

631

632

633

634 using namespace PatternMatch;

636 if (!TLI->isJumpExpensive() && CondI && CondI->hasOneUse() &&

637 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {

640 const Value *BOp0, *BOp1;

642 Opcode = Instruction::And;

644 Opcode = Instruction::Or;

645

648 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,

649 getEdgeProbability(&CurMBB, Succ0MBB),

650 getEdgeProbability(&CurMBB, Succ1MBB),

651 false);

652 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");

653

654

655 if (shouldEmitAsBranches(SL->SwitchCases)) {

656

657 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);

658 SL->SwitchCases.erase(SL->SwitchCases.begin());

659 return true;

660 }

661

662

663

664 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)

665 MF->erase(SL->SwitchCases[I].ThisBB);

666

667 SL->SwitchCases.clear();

668 }

669 }

670

671

674 nullptr, Succ0MBB, Succ1MBB, &CurMBB,

675 CurBuilder->getDebugLoc());

676

677

678

679 emitSwitchCase(CB, &CurMBB, *CurBuilder);

680 return true;

681}

682

686 if (!FuncInfo.BPI) {

687 Src->addSuccessorWithoutProb(Dst);

688 return;

689 }

691 Prob = getEdgeProbability(Src, Dst);

692 Src->addSuccessor(Dst, Prob);

693}

694

698 const BasicBlock *SrcBB = Src->getBasicBlock();

699 const BasicBlock *DstBB = Dst->getBasicBlock();

700 if (!FuncInfo.BPI) {

701

702

703 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);

704 return BranchProbability(1, SuccSize);

705 }

706 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);

707}

708

710 using namespace SwitchCG;

711

713 BranchProbabilityInfo *BPI = FuncInfo.BPI;

715 Clusters.reserve(SI.getNumCases());

716 for (const auto &I : SI.cases()) {

717 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());

718 assert(Succ && "Could not find successor mbb in mapping");

719 const ConstantInt *CaseVal = I.getCaseValue();

720 BranchProbability Prob =

722 : BranchProbability(1, SI.getNumCases() + 1);

723 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));

724 }

725

726 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());

727

728

729

730

732

733 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());

734

735

736 if (Clusters.empty()) {

738 if (DefaultMBB != SwitchMBB->getNextNode())

739 MIB.buildBr(*DefaultMBB);

740 return true;

741 }

742

743 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB, nullptr, nullptr);

744 SL->findBitTestClusters(Clusters, &SI);

745

747 dbgs() << "Case clusters: ";

748 for (const CaseCluster &C : Clusters) {

749 if (C.Kind == CC_JumpTable)

750 dbgs() << "JT:";

751 if (C.Kind == CC_BitTests)

752 dbgs() << "BT:";

753

754 C.Low->getValue().print(dbgs(), true);

755 if (C.Low != C.High) {

756 dbgs() << '-';

757 C.High->getValue().print(dbgs(), true);

758 }

759 dbgs() << ' ';

760 }

761 dbgs() << '\n';

762 });

763

764 assert(!Clusters.empty());

768 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);

769 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});

770

771 while (!WorkList.empty()) {

772 SwitchWorkListItem W = WorkList.pop_back_val();

773

774 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;

775

776 if (NumClusters > 3 &&

779 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB, MIB);

780 continue;

781 }

782

783 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))

784 return false;

785 }

786 return true;

787}

788

793 using namespace SwitchCG;

794 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&

795 "Clusters not sorted?");

796 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");

797

798 auto [LastLeft, FirstRight, LeftProb, RightProb] =

799 SL->computeSplitWorkItemInfo(W);

800

801

802

804 assert(PivotCluster > W.FirstCluster);

805 assert(PivotCluster <= W.LastCluster);

806

809

810 const ConstantInt *Pivot = PivotCluster->Low;

811

812

814 ++BBI;

815

816

817

818

819 MachineBasicBlock *LeftMBB;

820 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&

821 FirstLeft->Low == W.GE &&

822 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {

823 LeftMBB = FirstLeft->MBB;

824 } else {

825 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());

826 FuncInfo.MF->insert(BBI, LeftMBB);

828 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});

829 }

830

831

832

833

834 MachineBasicBlock *RightMBB;

835 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && W.LT &&

836 (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {

837 RightMBB = FirstRight->MBB;

838 } else {

839 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());

840 FuncInfo.MF->insert(BBI, RightMBB);

842 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});

843 }

844

845

847 LeftMBB, RightMBB, W.MBB, MIB.getDebugLoc(), LeftProb,

848 RightProb);

849

850 if (W.MBB == SwitchMBB)

851 emitSwitchCase(CB, SwitchMBB, MIB);

852 else

853 SL->SwitchCases.push_back(CB);

854}

855

858

859 assert(JT.Reg && "Should lower JT Header first!");

862 MIB.setDebugLoc(CurBuilder->getDebugLoc());

863

866

869}

870

874 MachineIRBuilder MIB(*HeaderBB->getParent());

875 MIB.setMBB(*HeaderBB);

876 MIB.setDebugLoc(CurBuilder->getDebugLoc());

877

879

881 Register SwitchOpReg = getOrCreateVReg(SValue);

883 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);

884

885

886

888 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));

890

891 JT.Reg = Sub.getReg(0);

892

896 return true;

897 }

898

899

900

901

902 auto Cst = getOrCreateVReg(

906

908

909

912 return true;

913}

914

923

925

933 return;

934 }

935

937

940

941

942

943 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&

945 Cond = CondLHS;

946 } else {

951 else

954 }

955 } else {

957 "Can only handle SLE ranges");

958

961

967 } else {

968 const LLT CmpTy = MRI->getType(CmpOpReg);

969 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);

972 }

973 }

974

975

977

980

981

982

986

989

993}

994

1004 bool FallthroughUnreachable) {

1005 using namespace SwitchCG;

1006 MachineFunction *CurMF = SwitchMBB->getParent();

1007

1008 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;

1009 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;

1010 BranchProbability DefaultProb = W.DefaultProb;

1011

1012

1013 MachineBasicBlock *JumpMBB = JT->MBB;

1014 CurMF->insert(BBI, JumpMBB);

1015

1016

1017

1018

1020 CurMBB);

1022 JumpMBB);

1023

1024 auto JumpProb = I->Prob;

1025 auto FallthroughProb = UnhandledProbs;

1026

1027

1028

1029

1032 SI != SE; ++SI) {

1033 if (*SI == DefaultMBB) {

1034 JumpProb += DefaultProb / 2;

1035 FallthroughProb -= DefaultProb / 2;

1038 } else {

1039

1040 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},

1041 JumpMBB);

1042 }

1043 }

1044

1045 if (FallthroughUnreachable)

1046 JTH->FallthroughUnreachable = true;

1047

1048 if (!JTH->FallthroughUnreachable)

1049 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);

1050 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);

1052

1053

1054

1055 JTH->HeaderBB = CurMBB;

1056 JT->Default = Fallthrough;

1057

1058

1059 if (CurMBB == SwitchMBB) {

1060 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))

1061 return false;

1062 JTH->Emitted = true;

1063 }

1064 return true;

1065}

1069 bool FallthroughUnreachable,

1074 using namespace SwitchCG;

1077 if (I->Low == I->High) {

1078

1081 RHS = I->Low;

1082 MHS = nullptr;

1083 } else {

1084

1086 LHS = I->Low;

1088 RHS = I->High;

1089 }

1090

1091

1092

1093 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,

1094 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);

1095

1096 emitSwitchCase(CB, SwitchMBB, MIB);

1097 return true;

1098}

1099

1102 MachineIRBuilder &MIB = *CurBuilder;

1103 MIB.setMBB(*SwitchBB);

1104

1105

1106 Register SwitchOpReg = getOrCreateVReg(*B.SValue);

1107

1108 LLT SwitchOpTy = MRI->getType(SwitchOpReg);

1110 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);

1111

1114

1115 LLT MaskTy = SwitchOpTy;

1119 else {

1120

1121 for (const SwitchCG::BitTestCase &Case : B.Cases) {

1123

1124

1126 break;

1127 }

1128 }

1129 }

1131 if (SwitchOpTy != MaskTy)

1133

1136

1137 MachineBasicBlock *MBB = B.Cases[0].ThisBB;

1138

1139 if (B.FallthroughUnreachable)

1140 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);

1141 addSuccessorWithProb(SwitchBB, MBB, B.Prob);

1142

1144

1145 if (B.FallthroughUnreachable) {

1146

1147 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);

1149 RangeSub, RangeCst);

1151 }

1152

1153

1156}

1157

1163 MachineIRBuilder &MIB = *CurBuilder;

1164 MIB.setMBB(*SwitchBB);

1165

1169 if (PopCount == 1) {

1170

1171

1172 auto MaskTrailingZeros =

1177 } else if (PopCount == BB.Range) {

1178

1179 auto MaskTrailingOnes =

1183 } else {

1184

1186 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);

1187

1188

1190 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);

1194 }

1195

1196

1197 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);

1198

1199 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);

1200

1201

1202

1204

1205

1206

1207 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},

1208 SwitchBB);

1209

1211

1212

1215}

1216

1217bool IRTranslator::lowerBitTestWorkItem(

1223 bool FallthroughUnreachable) {

1224 using namespace SwitchCG;

1225 MachineFunction *CurMF = SwitchMBB->getParent();

1226

1227 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];

1228

1229 for (BitTestCase &BTC : BTB->Cases)

1230 CurMF->insert(BBI, BTC.ThisBB);

1231

1232

1233 BTB->Parent = CurMBB;

1234 BTB->Default = Fallthrough;

1235

1236 BTB->DefaultProb = UnhandledProbs;

1237

1238

1239

1240 if (!BTB->ContiguousRange) {

1241 BTB->Prob += DefaultProb / 2;

1242 BTB->DefaultProb -= DefaultProb / 2;

1243 }

1244

1245 if (FallthroughUnreachable)

1246 BTB->FallthroughUnreachable = true;

1247

1248

1249 if (CurMBB == SwitchMBB) {

1250 emitBitTestHeader(*BTB, SwitchMBB);

1251 BTB->Emitted = true;

1252 }

1253 return true;

1254}

1255

1261 using namespace SwitchCG;

1262 MachineFunction *CurMF = FuncInfo.MF;

1263 MachineBasicBlock *NextMBB = nullptr;

1265 if (++BBI != FuncInfo.MF->end())

1266 NextMBB = &*BBI;

1267

1268 if (EnableOpts) {

1269

1270

1271

1272

1273 llvm::sort(W.FirstCluster, W.LastCluster + 1,

1274 [](const CaseCluster &a, const CaseCluster &b) {

1275 return a.Prob != b.Prob

1276 ? a.Prob > b.Prob

1277 : a.Low->getValue().slt(b.Low->getValue());

1278 });

1279

1280

1281

1282 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {

1283 --I;

1284 if (I->Prob > W.LastCluster->Prob)

1285 break;

1286 if (I->Kind == CC_Range && I->MBB == NextMBB) {

1288 break;

1289 }

1290 }

1291 }

1292

1293

1294 BranchProbability DefaultProb = W.DefaultProb;

1295 BranchProbability UnhandledProbs = DefaultProb;

1296 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)

1297 UnhandledProbs += I->Prob;

1298

1299 MachineBasicBlock *CurMBB = W.MBB;

1300 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {

1301 bool FallthroughUnreachable = false;

1302 MachineBasicBlock *Fallthrough;

1303 if (I == W.LastCluster) {

1304

1305 Fallthrough = DefaultMBB;

1308 } else {

1310 CurMF->insert(BBI, Fallthrough);

1311 }

1312 UnhandledProbs -= I->Prob;

1313

1314 switch (I->Kind) {

1316 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,

1317 DefaultProb, UnhandledProbs, I, Fallthrough,

1318 FallthroughUnreachable)) {

1319 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");

1320 return false;

1321 }

1322 break;

1323 }

1324

1326 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,

1327 UnhandledProbs, I, Fallthrough,

1328 FallthroughUnreachable)) {

1330 return false;

1331 }

1332 break;

1333 }

1335 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,

1336 FallthroughUnreachable, UnhandledProbs,

1337 CurMBB, MIB, SwitchMBB)) {

1338 LLVM_DEBUG(dbgs() << "Failed to lower switch range");

1339 return false;

1340 }

1341 break;

1342 }

1343 }

1344 CurMBB = Fallthrough;

1345 }

1346

1347 return true;

1348}

1349

1350bool IRTranslator::translateIndirectBr(const User &U,

1353

1356

1357

1358 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;

1359 MachineBasicBlock &CurBB = MIRBuilder.getMBB();

1360 for (const BasicBlock *Succ : successors(&BrInst)) {

1361

1362

1363

1364 if (!AddedSuccessors.insert(Succ).second)

1365 continue;

1367 }

1368

1369 return true;

1370}

1371

1374 return Arg->hasSwiftErrorAttr();

1377 return false;

1378}

1379

1380bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {

1382 TypeSize StoreSize = DL->getTypeStoreSize(LI.getType());

1383 if (StoreSize.isZero())

1384 return true;

1385

1387 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);

1390

1392 Type *OffsetIRTy = DL->getIndexType(Ptr->getType());

1394

1395 if (CLI->supportSwiftError() && isSwiftError(Ptr)) {

1396 assert(Regs.size() == 1 && "swifterror should be single pointer");

1398 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), Ptr);

1399 MIRBuilder.buildCopy(Regs[0], VReg);

1400 return true;

1401 }

1402

1404 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);

1406 if (AA->pointsToConstantMemory(

1409 }

1410 }

1411

1412 const MDNode *Ranges =

1413 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;

1414 for (unsigned i = 0; i < Regs.size(); ++i) {

1417

1419 Align BaseAlign = getMemOpAlign(LI);

1420 auto MMO =

1421 MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Regs[i]),

1424 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);

1425 }

1426

1427 return true;

1428}

1429

1430bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {

1432 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()).isZero())

1433 return true;

1434

1436 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());

1437 Register Base = getOrCreateVReg(*SI.getPointerOperand());

1438

1439 Type *OffsetIRTy = DL->getIndexType(SI.getPointerOperandType());

1441

1442 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {

1443 assert(Vals.size() == 1 && "swifterror should be single pointer");

1444

1445 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),

1446 SI.getPointerOperand());

1447 MIRBuilder.buildCopy(VReg, Vals[0]);

1448 return true;

1449 }

1450

1452

1453 for (unsigned i = 0; i < Vals.size(); ++i) {

1456

1457 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i]);

1458 Align BaseAlign = getMemOpAlign(SI);

1459 auto MMO = MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Vals[i]),

1461 SI.getAAMetadata(), nullptr,

1462 SI.getSyncScopeID(), SI.getOrdering());

1463 MIRBuilder.buildStore(Vals[i], Addr, *MMO);

1464 }

1465 return true;

1466}

1467

1469 const Value *Src = U.getOperand(0);

1471

1472

1473

1476

1478 for (auto Idx : EVI->indices())

1481 for (auto Idx : IVI->indices())

1483 } else {

1485 }

1486

1487 return static_cast<uint64_t>(

1488 DL.getIndexedOffsetInType(Src->getType(), Indices));

1489}

1490

1491bool IRTranslator::translateExtractValue(const User &U,

1493 const Value *Src = U.getOperand(0);

1496 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);

1498 auto &DstRegs = allocateVRegs(U);

1499

1500 for (unsigned i = 0; i < DstRegs.size(); ++i)

1501 DstRegs[i] = SrcRegs[Idx++];

1502

1503 return true;

1504}

1505

1506bool IRTranslator::translateInsertValue(const User &U,

1508 const Value *Src = U.getOperand(0);

1510 auto &DstRegs = allocateVRegs(U);

1511 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);

1514 auto *InsertedIt = InsertedRegs.begin();

1515

1516 for (unsigned i = 0; i < DstRegs.size(); ++i) {

1517 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())

1518 DstRegs[i] = *InsertedIt++;

1519 else

1520 DstRegs[i] = SrcRegs[i];

1521 }

1522

1523 return true;

1524}

1525

1526bool IRTranslator::translateSelect(const User &U,

1528 Register Tst = getOrCreateVReg(*U.getOperand(0));

1532

1533 uint32_t Flags = 0;

1536

1537 for (unsigned i = 0; i < ResRegs.size(); ++i) {

1538 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);

1539 }

1540

1541 return true;

1542}

1543

1544bool IRTranslator::translateCopy(const User &U, const Value &V,

1546 Register Src = getOrCreateVReg(V);

1547 auto &Regs = *VMap.getVRegs(U);

1548 if (Regs.empty()) {

1549 Regs.push_back(Src);

1550 VMap.getOffsets(U)->push_back(0);

1551 } else {

1552

1553

1554 MIRBuilder.buildCopy(Regs[0], Src);

1555 }

1556 return true;

1557}

1558

1559bool IRTranslator::translateBitCast(const User &U,

1561

1562 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==

1564

1565

1567 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,

1568 MIRBuilder);

1569 return translateCopy(U, *U.getOperand(0), MIRBuilder);

1570 }

1571

1572 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);

1573}

1574

1575bool IRTranslator::translateCast(unsigned Opcode, const User &U,

1578 return false;

1579

1580 uint32_t Flags = 0;

1583

1584 Register Op = getOrCreateVReg(*U.getOperand(0));

1585 Register Res = getOrCreateVReg(U);

1587 return true;

1588}

1589

1590bool IRTranslator::translateGetElementPtr(const User &U,

1592 Value &Op0 = *U.getOperand(0);

1596 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);

1598

1599 uint32_t PtrAddFlags = 0;

1600

1601

1604

1605 auto PtrAddFlagsWithConst = [&](int64_t Offset) {

1606

1607

1610 return PtrAddFlags;

1611 };

1612

1613

1614

1615 unsigned VectorWidth = 0;

1616

1617

1618

1619 bool WantSplatVector = false;

1622

1623 WantSplatVector = VectorWidth > 1;

1624 }

1625

1626

1627

1628 if (WantSplatVector && !PtrTy.isVector()) {

1631 BaseReg)

1635 OffsetIRTy = DL->getIndexType(PtrIRTy);

1637 }

1638

1641 GTI != E; ++GTI) {

1642 const Value *Idx = GTI.getOperand();

1643 if (StructType *StTy = GTI.getStructTypeOrNull()) {

1644 unsigned Field = cast(Idx)->getUniqueInteger().getZExtValue();

1645 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);

1646 continue;

1647 } else {

1648 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);

1649

1650

1651

1653 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {

1654 Offset += ElementSize * *Val;

1655 continue;

1656 }

1657 }

1658

1662 .buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0),

1663 PtrAddFlagsWithConst(Offset))

1666 }

1667

1668 Register IdxReg = getOrCreateVReg(*Idx);

1669 LLT IdxTy = MRI->getType(IdxReg);

1670 if (IdxTy != OffsetTy) {

1671 if (!IdxTy.isVector() && WantSplatVector) {

1672 IdxReg = MIRBuilder

1674 IdxReg)

1676 }

1677

1679 }

1680

1681

1682

1684 if (ElementSize != 1) {

1687

1688

1689

1693

1694 GepOffsetReg =

1695 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB, ScaleFlags)

1697 } else {

1698 GepOffsetReg = IdxReg;

1699 }

1700

1702 MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg, PtrAddFlags)

1704 }

1705 }

1706

1708 auto OffsetMIB =

1710

1711 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),

1712 PtrAddFlagsWithConst(Offset));

1713 return true;

1714 }

1715

1716 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);

1717 return true;

1718}

1719

1720bool IRTranslator::translateMemFunc(const CallInst &CI,

1722 unsigned Opcode) {

1724

1726 return true;

1727

1729

1730 unsigned MinPtrSize = UINT_MAX;

1731 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {

1732 Register SrcReg = getOrCreateVReg(**AI);

1733 LLT SrcTy = MRI->getType(SrcReg);

1735 MinPtrSize = std::min(SrcTy.getSizeInBits(), MinPtrSize);

1737 }

1738

1740

1741

1742 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];

1743 if (MRI->getType(SizeOpReg) != SizeTy)

1745

1746 auto ICall = MIRBuilder.buildInstr(Opcode);

1747 for (Register SrcReg : SrcRegs)

1748 ICall.addUse(SrcReg);

1749

1752 unsigned IsVol =

1754

1755 ConstantInt *CopySize = nullptr;

1756

1758 DstAlign = MCI->getDestAlign().valueOrOne();

1759 SrcAlign = MCI->getSourceAlign().valueOrOne();

1762 DstAlign = MMI->getDestAlign().valueOrOne();

1763 SrcAlign = MMI->getSourceAlign().valueOrOne();

1765 } else {

1767 DstAlign = MSI->getDestAlign().valueOrOne();

1768 }

1769

1770 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {

1771

1772

1773

1774 ICall.addImm(CI.isTailCall() ? 1 : 0);

1775 }

1776

1777

1780 if (IsVol) {

1783 }

1784

1786 if (AA && CopySize &&

1787 AA->pointsToConstantMemory(MemoryLocation(

1790

1791

1792

1793

1795 }

1796

1797 ICall.addMemOperand(

1798 MF->getMachineMemOperand(MachinePointerInfo(CI.getArgOperand(0)),

1799 StoreFlags, 1, DstAlign, AAInfo));

1800 if (Opcode != TargetOpcode::G_MEMSET)

1801 ICall.addMemOperand(MF->getMachineMemOperand(

1802 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));

1803

1804 return true;

1805}

1806

1807bool IRTranslator::translateTrap(const CallInst &CI,

1809 unsigned Opcode) {

1810 StringRef TrapFuncName =

1811 CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();

1812 if (TrapFuncName.empty()) {

1813 if (Opcode == TargetOpcode::G_UBSANTRAP) {

1816 } else {

1818 }

1819 return true;

1820 }

1821

1822 CallLowering::CallLoweringInfo Info;

1823 if (Opcode == TargetOpcode::G_UBSANTRAP)

1824 Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),

1826

1828 Info.CB = &CI;

1830 return CLI->lowerCall(MIRBuilder, Info);

1831}

1832

1833bool IRTranslator::translateVectorInterleave2Intrinsic(

1836 "This function can only be called on the interleave2 intrinsic!");

1837

1840 Register Res = getOrCreateVReg(CI);

1841

1842 LLT OpTy = MRI->getType(Op0);

1845

1846 return true;

1847}

1848

1849bool IRTranslator::translateVectorDeinterleave2Intrinsic(

1852 "This function can only be called on the deinterleave2 intrinsic!");

1853

1854

1858

1859 LLT ResTy = MRI->getType(Res[0]);

1864

1865 return true;

1866}

1867

1868void IRTranslator::getStackGuard(Register DstReg,

1870 Value *Global = TLI->getSDagStackGuard(*MF->getFunction().getParent());

1872 LLVMContext &Ctx = MIRBuilder.getContext();

1873 Ctx.diagnose(DiagnosticInfoGeneric("unable to lower stackguard"));

1875 return;

1876 }

1877

1878 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();

1879 MRI->setRegClass(DstReg, TRI->getPointerRegClass());

1880 auto MIB =

1881 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});

1882

1883 unsigned AddrSpace = Global->getType()->getPointerAddressSpace();

1884 LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));

1885

1886 MachinePointerInfo MPInfo(Global);

1889 MachineMemOperand *MemRef = MF->getMachineMemOperand(

1890 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));

1891 MIB.setMemRefs({MemRef});

1892}

1893

1894bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,

1898 Op, {ResRegs[0], ResRegs[1]},

1900

1901 return true;

1902}

1903

1904bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,

1906 Register Dst = getOrCreateVReg(CI);

1910 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });

1911 return true;

1912}

1913

1914unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {

1915 switch (ID) {

1916 default:

1917 break;

1918 case Intrinsic::acos:

1919 return TargetOpcode::G_FACOS;

1920 case Intrinsic::asin:

1921 return TargetOpcode::G_FASIN;

1922 case Intrinsic::atan:

1923 return TargetOpcode::G_FATAN;

1924 case Intrinsic::atan2:

1925 return TargetOpcode::G_FATAN2;

1926 case Intrinsic::bswap:

1927 return TargetOpcode::G_BSWAP;

1928 case Intrinsic::bitreverse:

1929 return TargetOpcode::G_BITREVERSE;

1930 case Intrinsic::fshl:

1931 return TargetOpcode::G_FSHL;

1932 case Intrinsic::fshr:

1933 return TargetOpcode::G_FSHR;

1934 case Intrinsic::ceil:

1935 return TargetOpcode::G_FCEIL;

1936 case Intrinsic::cos:

1937 return TargetOpcode::G_FCOS;

1938 case Intrinsic::cosh:

1939 return TargetOpcode::G_FCOSH;

1940 case Intrinsic::ctpop:

1941 return TargetOpcode::G_CTPOP;

1942 case Intrinsic::exp:

1943 return TargetOpcode::G_FEXP;

1944 case Intrinsic::exp2:

1945 return TargetOpcode::G_FEXP2;

1946 case Intrinsic::exp10:

1947 return TargetOpcode::G_FEXP10;

1948 case Intrinsic::fabs:

1949 return TargetOpcode::G_FABS;

1950 case Intrinsic::copysign:

1951 return TargetOpcode::G_FCOPYSIGN;

1952 case Intrinsic::minnum:

1953 return TargetOpcode::G_FMINNUM;

1954 case Intrinsic::maxnum:

1955 return TargetOpcode::G_FMAXNUM;

1956 case Intrinsic::minimum:

1957 return TargetOpcode::G_FMINIMUM;

1958 case Intrinsic::maximum:

1959 return TargetOpcode::G_FMAXIMUM;

1960 case Intrinsic::minimumnum:

1961 return TargetOpcode::G_FMINIMUMNUM;

1962 case Intrinsic::maximumnum:

1963 return TargetOpcode::G_FMAXIMUMNUM;

1964 case Intrinsic::canonicalize:

1965 return TargetOpcode::G_FCANONICALIZE;

1966 case Intrinsic:🤣

1967 return TargetOpcode::G_FFLOOR;

1968 case Intrinsic::fma:

1969 return TargetOpcode::G_FMA;

1970 case Intrinsic:🪵

1971 return TargetOpcode::G_FLOG;

1972 case Intrinsic::log2:

1973 return TargetOpcode::G_FLOG2;

1974 case Intrinsic::log10:

1975 return TargetOpcode::G_FLOG10;

1976 case Intrinsic::ldexp:

1977 return TargetOpcode::G_FLDEXP;

1978 case Intrinsic::nearbyint:

1979 return TargetOpcode::G_FNEARBYINT;

1980 case Intrinsic::pow:

1981 return TargetOpcode::G_FPOW;

1982 case Intrinsic::powi:

1983 return TargetOpcode::G_FPOWI;

1984 case Intrinsic::rint:

1985 return TargetOpcode::G_FRINT;

1986 case Intrinsic::round:

1987 return TargetOpcode::G_INTRINSIC_ROUND;

1988 case Intrinsic::roundeven:

1989 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;

1990 case Intrinsic::sin:

1991 return TargetOpcode::G_FSIN;

1992 case Intrinsic::sinh:

1993 return TargetOpcode::G_FSINH;

1994 case Intrinsic::sqrt:

1995 return TargetOpcode::G_FSQRT;

1996 case Intrinsic::tan:

1997 return TargetOpcode::G_FTAN;

1998 case Intrinsic::tanh:

1999 return TargetOpcode::G_FTANH;

2000 case Intrinsic::trunc:

2001 return TargetOpcode::G_INTRINSIC_TRUNC;

2002 case Intrinsic::readcyclecounter:

2003 return TargetOpcode::G_READCYCLECOUNTER;

2004 case Intrinsic::readsteadycounter:

2005 return TargetOpcode::G_READSTEADYCOUNTER;

2006 case Intrinsic::ptrmask:

2007 return TargetOpcode::G_PTRMASK;

2008 case Intrinsic::lrint:

2009 return TargetOpcode::G_INTRINSIC_LRINT;

2010 case Intrinsic::llrint:

2011 return TargetOpcode::G_INTRINSIC_LLRINT;

2012

2013 case Intrinsic::vector_reduce_fmin:

2014 return TargetOpcode::G_VECREDUCE_FMIN;

2015 case Intrinsic::vector_reduce_fmax:

2016 return TargetOpcode::G_VECREDUCE_FMAX;

2017 case Intrinsic::vector_reduce_fminimum:

2018 return TargetOpcode::G_VECREDUCE_FMINIMUM;

2019 case Intrinsic::vector_reduce_fmaximum:

2020 return TargetOpcode::G_VECREDUCE_FMAXIMUM;

2021 case Intrinsic::vector_reduce_add:

2022 return TargetOpcode::G_VECREDUCE_ADD;

2023 case Intrinsic::vector_reduce_mul:

2024 return TargetOpcode::G_VECREDUCE_MUL;

2025 case Intrinsic::vector_reduce_and:

2026 return TargetOpcode::G_VECREDUCE_AND;

2027 case Intrinsic::vector_reduce_or:

2028 return TargetOpcode::G_VECREDUCE_OR;

2029 case Intrinsic::vector_reduce_xor:

2030 return TargetOpcode::G_VECREDUCE_XOR;

2031 case Intrinsic::vector_reduce_smax:

2032 return TargetOpcode::G_VECREDUCE_SMAX;

2033 case Intrinsic::vector_reduce_smin:

2034 return TargetOpcode::G_VECREDUCE_SMIN;

2035 case Intrinsic::vector_reduce_umax:

2036 return TargetOpcode::G_VECREDUCE_UMAX;

2037 case Intrinsic::vector_reduce_umin:

2038 return TargetOpcode::G_VECREDUCE_UMIN;

2039 case Intrinsic::experimental_vector_compress:

2040 return TargetOpcode::G_VECTOR_COMPRESS;

2041 case Intrinsic::lround:

2042 return TargetOpcode::G_LROUND;

2043 case Intrinsic::llround:

2044 return TargetOpcode::G_LLROUND;

2045 case Intrinsic::get_fpenv:

2046 return TargetOpcode::G_GET_FPENV;

2047 case Intrinsic::get_fpmode:

2048 return TargetOpcode::G_GET_FPMODE;

2049 }

2051}

2052

2053bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,

2056

2057 unsigned Op = getSimpleIntrinsicOpcode(ID);

2058

2059

2061 return false;

2062

2063

2065 for (const auto &Arg : CI.args())

2066 VRegs.push_back(getOrCreateVReg(*Arg));

2067

2068 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,

2070 return true;

2071}

2072

2073

2075 switch (ID) {

2076 case Intrinsic::experimental_constrained_fadd:

2077 return TargetOpcode::G_STRICT_FADD;

2078 case Intrinsic::experimental_constrained_fsub:

2079 return TargetOpcode::G_STRICT_FSUB;

2080 case Intrinsic::experimental_constrained_fmul:

2081 return TargetOpcode::G_STRICT_FMUL;

2082 case Intrinsic::experimental_constrained_fdiv:

2083 return TargetOpcode::G_STRICT_FDIV;

2084 case Intrinsic::experimental_constrained_frem:

2085 return TargetOpcode::G_STRICT_FREM;

2086 case Intrinsic::experimental_constrained_fma:

2087 return TargetOpcode::G_STRICT_FMA;

2088 case Intrinsic::experimental_constrained_sqrt:

2089 return TargetOpcode::G_STRICT_FSQRT;

2090 case Intrinsic::experimental_constrained_ldexp:

2091 return TargetOpcode::G_STRICT_FLDEXP;

2092 default:

2093 return 0;

2094 }

2095}

2096

2097bool IRTranslator::translateConstrainedFPIntrinsic(

2100

2102 if (!Opcode)

2103 return false;

2104

2108

2112

2113 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);

2114 return true;

2115}

2116

2117std::optional IRTranslator::getArgPhysReg(Argument &Arg) {

2118 auto VRegs = getOrCreateVRegs(Arg);

2119 if (VRegs.size() != 1)

2120 return std::nullopt;

2121

2122

2123 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);

2124 if (!VRegDef || !VRegDef->isCopy())

2125 return std::nullopt;

2126 return VRegDef->getOperand(1).getReg().asMCReg();

2127}

2128

2129bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val,

2135 if (!Arg)

2136 return false;

2137

2139 return false;

2140

2141 std::optional PhysReg = getArgPhysReg(*Arg);

2142 if (!PhysReg) {

2143 LLVM_DEBUG(dbgs() << "Dropping dbg." << (isDeclare ? "declare" : "value")

2144 << ": expression is entry_value but "

2145 << "couldn't find a physical register\n");

2147 return true;

2148 }

2149

2150 if (isDeclare) {

2151

2153 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);

2154 } else {

2156 }

2157

2158 return true;

2159}

2160

2162 switch (ID) {

2163 default:

2165 case Intrinsic::experimental_convergence_anchor:

2166 return TargetOpcode::CONVERGENCECTRL_ANCHOR;

2167 case Intrinsic::experimental_convergence_entry:

2168 return TargetOpcode::CONVERGENCECTRL_ENTRY;

2169 case Intrinsic::experimental_convergence_loop:

2170 return TargetOpcode::CONVERGENCECTRL_LOOP;

2171 }

2172}

2173

2174bool IRTranslator::translateConvergenceControlIntrinsic(

2177 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);

2178 MIB.addDef(OutputReg);

2179

2180 if (ID == Intrinsic::experimental_convergence_loop) {

2182 assert(Bundle && "Expected a convergence control token.");

2184 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());

2185 MIB.addUse(InputReg);

2186 }

2187

2188 return true;

2189}

2190

2194 if (ORE->enabled()) {

2196 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);

2197 R.visit(MI);

2198 }

2199 }

2200 }

2201

2202

2203

2204 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))

2205 return true;

2206

2207 switch (ID) {

2208 default:

2209 break;

2210 case Intrinsic::lifetime_start:

2211 case Intrinsic::lifetime_end: {

2212

2214 MF->getFunction().hasOptNone())

2215 return true;

2216

2217 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START

2218 : TargetOpcode::LIFETIME_END;

2219

2222 return true;

2223

2225 return true;

2226 }

2227 case Intrinsic::fake_use: {

2229 for (const auto &Arg : CI.args())

2231 MIRBuilder.buildInstr(TargetOpcode::FAKE_USE, {}, VRegs);

2232 MF->setHasFakeUses(true);

2233 return true;

2234 }

2235 case Intrinsic::dbg_declare: {

2240 return true;

2241 }

2242 case Intrinsic::dbg_label: {

2245

2248 "Expected inlined-at fields to agree");

2249

2251 return true;

2252 }

2253 case Intrinsic::vaend:

2254

2255

2256 return true;

2257 case Intrinsic::vastart: {

2259 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;

2261

2262 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})

2263 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),

2265 ListSize, Alignment));

2266 return true;

2267 }

2268 case Intrinsic::dbg_assign:

2269

2270

2271

2272

2273

2274 [[fallthrough]];

2275 case Intrinsic::dbg_value: {

2276

2280 return true;

2281 }

2282 case Intrinsic::uadd_with_overflow:

2283 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);

2284 case Intrinsic::sadd_with_overflow:

2285 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);

2286 case Intrinsic::usub_with_overflow:

2287 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);

2288 case Intrinsic::ssub_with_overflow:

2289 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);

2290 case Intrinsic::umul_with_overflow:

2291 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);

2292 case Intrinsic::smul_with_overflow:

2293 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);

2294 case Intrinsic::uadd_sat:

2295 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);

2296 case Intrinsic::sadd_sat:

2297 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);

2298 case Intrinsic::usub_sat:

2299 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);

2300 case Intrinsic::ssub_sat:

2301 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);

2302 case Intrinsic::ushl_sat:

2303 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);

2304 case Intrinsic::sshl_sat:

2305 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);

2306 case Intrinsic::umin:

2307 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);

2308 case Intrinsic::umax:

2309 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);

2310 case Intrinsic::smin:

2311 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);

2312 case Intrinsic::smax:

2313 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);

2314 case Intrinsic::abs:

2315

2316 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);

2317 case Intrinsic::smul_fix:

2318 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);

2319 case Intrinsic::umul_fix:

2320 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);

2321 case Intrinsic::smul_fix_sat:

2322 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);

2323 case Intrinsic::umul_fix_sat:

2324 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);

2325 case Intrinsic::sdiv_fix:

2326 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);

2327 case Intrinsic::udiv_fix:

2328 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);

2329 case Intrinsic::sdiv_fix_sat:

2330 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);

2331 case Intrinsic::udiv_fix_sat:

2332 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);

2333 case Intrinsic::fmuladd: {

2334 const TargetMachine &TM = MF->getTarget();

2335 Register Dst = getOrCreateVReg(CI);

2340 TLI->isFMAFasterThanFMulAndFAdd(*MF,

2341 TLI->getValueType(*DL, CI.getType()))) {

2342

2343

2344 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,

2346 } else {

2352 }

2353 return true;

2354 }

2355 case Intrinsic::convert_from_fp16:

2356

2357 MIRBuilder.buildFPExt(getOrCreateVReg(CI),

2360 return true;

2361 case Intrinsic::convert_to_fp16:

2362

2363 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),

2366 return true;

2367 case Intrinsic::frexp: {

2369 MIRBuilder.buildFFrexp(VRegs[0], VRegs[1],

2372 return true;

2373 }

2374 case Intrinsic::modf: {

2376 MIRBuilder.buildModf(VRegs[0], VRegs[1],

2379 return true;

2380 }

2381 case Intrinsic::sincos: {

2386 return true;

2387 }

2388 case Intrinsic::fptosi_sat:

2391 return true;

2392 case Intrinsic::fptoui_sat:

2395 return true;

2396 case Intrinsic::memcpy_inline:

2397 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);

2398 case Intrinsic::memcpy:

2399 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);

2400 case Intrinsic::memmove:

2401 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);

2402 case Intrinsic::memset:

2403 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);

2404 case Intrinsic::eh_typeid_for: {

2407 unsigned TypeID = MF->getTypeIDFor(GV);

2409 return true;

2410 }

2411 case Intrinsic::objectsize:

2412 llvm_unreachable("llvm.objectsize.* should have been lowered already");

2413

2414 case Intrinsic::is_constant:

2415 llvm_unreachable("llvm.is.constant.* should have been lowered already");

2416

2417 case Intrinsic::stackguard:

2418 getStackGuard(getOrCreateVReg(CI), MIRBuilder);

2419 return true;

2420 case Intrinsic::stackprotector: {

2423 if (TLI->useLoadStackGuardNode(*CI.getModule())) {

2424 GuardVal = MRI->createGenericVirtualRegister(PtrTy);

2425 getStackGuard(GuardVal, MIRBuilder);

2426 } else

2427 GuardVal = getOrCreateVReg(*CI.getArgOperand(0));

2428

2430 int FI = getOrCreateFrameIndex(*Slot);

2431 MF->getFrameInfo().setStackProtectorIndex(FI);

2432

2434 GuardVal, getOrCreateVReg(*Slot),

2438 PtrTy, Align(8)));

2439 return true;

2440 }

2441 case Intrinsic::stacksave: {

2442 MIRBuilder.buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});

2443 return true;

2444 }

2445 case Intrinsic::stackrestore: {

2446 MIRBuilder.buildInstr(TargetOpcode::G_STACKRESTORE, {},

2448 return true;

2449 }

2450 case Intrinsic::cttz:

2451 case Intrinsic::ctlz: {

2453 bool isTrailing = ID == Intrinsic::cttz;

2454 unsigned Opcode = isTrailing

2455 ? Cst->isZero() ? TargetOpcode::G_CTTZ

2456 : TargetOpcode::G_CTTZ_ZERO_UNDEF

2457 : Cst->isZero() ? TargetOpcode::G_CTLZ

2458 : TargetOpcode::G_CTLZ_ZERO_UNDEF;

2459 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},

2461 return true;

2462 }

2463 case Intrinsic::invariant_start: {

2464 MIRBuilder.buildUndef(getOrCreateVReg(CI));

2465 return true;

2466 }

2467 case Intrinsic::invariant_end:

2468 return true;

2469 case Intrinsic::expect:

2470 case Intrinsic::expect_with_probability:

2471 case Intrinsic::annotation:

2472 case Intrinsic::ptr_annotation:

2473 case Intrinsic::launder_invariant_group:

2474 case Intrinsic::strip_invariant_group: {

2475

2476 MIRBuilder.buildCopy(getOrCreateVReg(CI),

2478 return true;

2479 }

2480 case Intrinsic::assume:

2481 case Intrinsic::experimental_noalias_scope_decl:

2482 case Intrinsic::var_annotation:

2483 case Intrinsic::sideeffect:

2484

2485 return true;

2486 case Intrinsic::read_volatile_register:

2487 case Intrinsic::read_register: {

2489 MIRBuilder

2490 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})

2492 return true;

2493 }

2494 case Intrinsic::write_register: {

2496 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)

2499 return true;

2500 }

2501 case Intrinsic::localescape: {

2502 MachineBasicBlock &EntryMBB = MF->front();

2504

2505

2506

2507 for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) {

2510 continue;

2511

2514 MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);

2515

2516

2517 auto LocalEscape =

2519 .addSym(FrameAllocSym)

2521

2522 EntryMBB.insert(EntryMBB.begin(), LocalEscape);

2523 }

2524

2525 return true;

2526 }

2527 case Intrinsic::vector_reduce_fadd:

2528 case Intrinsic::vector_reduce_fmul: {

2529

2530

2531 Register Dst = getOrCreateVReg(CI);

2534 unsigned Opc = 0;

2536

2537 Opc = ID == Intrinsic::vector_reduce_fadd

2538 ? TargetOpcode::G_VECREDUCE_SEQ_FADD

2539 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;

2540 if (!MRI->getType(VecSrc).isVector())

2541 Opc = ID == Intrinsic::vector_reduce_fadd ? TargetOpcode::G_FADD

2542 : TargetOpcode::G_FMUL;

2543 MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},

2545 return true;

2546 }

2547

2548

2549 unsigned ScalarOpc;

2550 if (ID == Intrinsic::vector_reduce_fadd) {

2551 Opc = TargetOpcode::G_VECREDUCE_FADD;

2552 ScalarOpc = TargetOpcode::G_FADD;

2553 } else {

2554 Opc = TargetOpcode::G_VECREDUCE_FMUL;

2555 ScalarOpc = TargetOpcode::G_FMUL;

2556 }

2557 LLT DstTy = MRI->getType(Dst);

2560 MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},

2562

2563 return true;

2564 }

2565 case Intrinsic:🪤

2566 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);

2567 case Intrinsic::debugtrap:

2568 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);

2569 case Intrinsic::ubsantrap:

2570 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);

2571 case Intrinsic::allow_runtime_check:

2572 case Intrinsic::allow_ubsan_check:

2573 MIRBuilder.buildCopy(getOrCreateVReg(CI),

2575 return true;

2576 case Intrinsic::amdgcn_cs_chain:

2577 case Intrinsic::amdgcn_call_whole_wave:

2578 return translateCallBase(CI, MIRBuilder);

2579 case Intrinsic::fptrunc_round: {

2581

2582

2584 std::optional RoundMode =

2586

2587

2588 MIRBuilder

2589 .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,

2590 {getOrCreateVReg(CI)},

2592 .addImm((int)*RoundMode);

2593

2594 return true;

2595 }

2596 case Intrinsic::is_fpclass: {

2599

2600 MIRBuilder

2601 .buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},

2602 {getOrCreateVReg(*FpValue)})

2604

2605 return true;

2606 }

2607 case Intrinsic::set_fpenv: {

2609 MIRBuilder.buildSetFPEnv(getOrCreateVReg(*FPEnv));

2610 return true;

2611 }

2612 case Intrinsic::reset_fpenv:

2614 return true;

2615 case Intrinsic::set_fpmode: {

2617 MIRBuilder.buildSetFPMode(getOrCreateVReg(*FPState));

2618 return true;

2619 }

2620 case Intrinsic::reset_fpmode:

2622 return true;

2623 case Intrinsic::get_rounding:

2625 return true;

2626 case Intrinsic::set_rounding:

2628 return true;

2629 case Intrinsic::vscale: {

2630 MIRBuilder.buildVScale(getOrCreateVReg(CI), 1);

2631 return true;

2632 }

2633 case Intrinsic::scmp:

2634 MIRBuilder.buildSCmp(getOrCreateVReg(CI),

2636 getOrCreateVReg(*CI.getOperand(1)));

2637 return true;

2638 case Intrinsic::ucmp:

2639 MIRBuilder.buildUCmp(getOrCreateVReg(CI),

2641 getOrCreateVReg(*CI.getOperand(1)));

2642 return true;

2643 case Intrinsic::vector_extract:

2644 return translateExtractVector(CI, MIRBuilder);

2645 case Intrinsic::vector_insert:

2646 return translateInsertVector(CI, MIRBuilder);

2647 case Intrinsic::stepvector: {

2649 return true;

2650 }

2651 case Intrinsic::prefetch: {

2656

2658 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,

2659 LLT(), Align());

2660

2661 MIRBuilder.buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,

2662 MMO);

2663

2664 return true;

2665 }

2666

2667 case Intrinsic::vector_interleave2:

2668 case Intrinsic::vector_deinterleave2: {

2669

2673 return false;

2674

2675 if (CI.getIntrinsicID() == Intrinsic::vector_interleave2)

2676 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);

2677

2678 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);

2679 }

2680

2681#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \

2682 case Intrinsic::INTRINSIC:

2683#include "llvm/IR/ConstrainedOps.def"

2685 MIRBuilder);

2686 case Intrinsic::experimental_convergence_anchor:

2687 case Intrinsic::experimental_convergence_entry:

2688 case Intrinsic::experimental_convergence_loop:

2689 return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);

2690 case Intrinsic::reloc_none: {

2693 MIRBuilder.buildInstr(TargetOpcode::RELOC_NONE)

2695 return true;

2696 }

2697 }

2698 return false;

2699}

2700

2701bool IRTranslator::translateInlineAsm(const CallBase &CB,

2704 return false;

2705

2706 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();

2707

2708 if (!ALI) {

2710 dbgs() << "Inline asm lowering is not supported for this target yet\n");

2711 return false;

2712 }

2713

2715 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });

2716}

2717

2718bool IRTranslator::translateCallBase(const CallBase &CB,

2721

2724 Register SwiftErrorVReg = 0;

2725 for (const auto &Arg : CB.args()) {

2726 if (CLI->supportSwiftError() && isSwiftError(Arg)) {

2727 assert(SwiftInVReg == 0 && "Expected only one swift error argument");

2729 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);

2730 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(

2731 &CB, &MIRBuilder.getMBB(), Arg));

2733 SwiftErrorVReg =

2734 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);

2735 continue;

2736 }

2737 Args.push_back(getOrCreateVRegs(*Arg));

2738 }

2739

2741 if (ORE->enabled()) {

2743 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);

2744 R.visit(CI);

2745 }

2746 }

2747 }

2748

2749 std::optionalCallLowering::PtrAuthInfo PAI;

2751

2753

2754 const Value *Key = Bundle->Inputs[0];

2756

2757

2758

2759

2761 if (!CalleeCPA || isa<Function>(CalleeCPA->getPointer()) ||

2762 !CalleeCPA->isKnownCompatibleWith(Key, Discriminator, *DL)) {

2763

2764 Register DiscReg = getOrCreateVReg(*Discriminator);

2766 DiscReg};

2767 }

2768 }

2769

2770 Register ConvergenceCtrlToken = 0;

2772 const auto &Token = *Bundle->Inputs[0].get();

2773 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);

2774 }

2775

2776

2777

2778

2779 bool Success = CLI->lowerCall(

2780 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,

2782

2783

2785 assert(!HasTailCall && "Can't tail call return twice from block?");

2786 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();

2788 }

2789

2791}

2792

2793bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {

2795 return false;

2796

2799

2800

2801

2802 if (F && (F->hasDLLImportStorageClass() ||

2803 (MF->getTarget().getTargetTriple().isOSWindows() &&

2804 F->hasExternalWeakLinkage())))

2805 return false;

2806

2807

2809 return false;

2810

2811

2813 return false;

2814

2816 return translateInlineAsm(CI, MIRBuilder);

2817

2820 if (translateCallBase(CI, MIRBuilder)) {

2822 return true;

2823 }

2824 return false;

2825 }

2826

2828

2829 if (translateKnownIntrinsic(CI, ID, MIRBuilder))

2830 return true;

2831

2832 TargetLowering::IntrinsicInfo Info;

2833 bool IsTgtMemIntrinsic = TLI->getTgtMemIntrinsic(Info, CI, *MF, ID);

2834

2835 return translateIntrinsic(CI, ID, MIRBuilder,

2836 IsTgtMemIntrinsic ? &Info : nullptr);

2837}

2838

2839

2840

2841

2842

2843bool IRTranslator::translateIntrinsic(

2848 ResultRegs = getOrCreateVRegs(CB);

2849

2850

2851

2852 MachineInstrBuilder MIB = MIRBuilder.buildIntrinsic(ID, ResultRegs);

2855

2857

2858

2859 if (CB.paramHasAttr(Arg.index(), Attribute::ImmArg)) {

2861

2862

2863 assert(CI->getBitWidth() <= 64 &&

2864 "large intrinsic immediates not handled");

2865 MIB.addImm(CI->getSExtValue());

2866 } else {

2868 }

2870 auto *MD = MDVal->getMetadata();

2872 if (!MDN) {

2874 MDN = MDNode::get(MF->getFunction().getContext(), ConstMD);

2875 else

2876 return false;

2877 }

2879 } else {

2881 if (VRegs.size() > 1)

2882 return false;

2883 MIB.addUse(VRegs[0]);

2884 }

2885 }

2886

2887

2888 if (TgtMemIntrinsicInfo) {

2890

2891 Align Alignment = TgtMemIntrinsicInfo->align.value_or(DL->getABITypeAlign(

2893 LLT MemTy =

2896 : LLT::scalar(TgtMemIntrinsicInfo->memVT.getStoreSizeInBits());

2897

2898

2899

2900 MachinePointerInfo MPI;

2901 if (TgtMemIntrinsicInfo->ptrVal) {

2902 MPI = MachinePointerInfo(TgtMemIntrinsicInfo->ptrVal,

2903 TgtMemIntrinsicInfo->offset);

2905 MPI = MachinePointerInfo(*TgtMemIntrinsicInfo->fallbackAddressSpace);

2906 }

2908 MPI, TgtMemIntrinsicInfo->flags, MemTy, Alignment, CB.getAAMetadata(),

2909 nullptr, TgtMemIntrinsicInfo->ssid,

2910 TgtMemIntrinsicInfo->order, TgtMemIntrinsicInfo->failureOrder));

2911 }

2912

2915 auto *Token = Bundle->Inputs[0].get();

2916 Register TokenReg = getOrCreateVReg(*Token);

2918 }

2919 }

2920

2923

2924 return true;

2925}

2926

2927bool IRTranslator::findUnwindDestinations(

2930 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>

2931 &UnwindDests) {

2938

2939 if (IsWasmCXX) {

2940

2941 return false;

2942 }

2943

2944 while (EHPadBB) {

2948

2949 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);

2950 break;

2951 }

2953

2954

2955 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);

2956 UnwindDests.back().first->setIsEHScopeEntry();

2957 UnwindDests.back().first->setIsEHFuncletEntry();

2958 break;

2959 }

2961

2962 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {

2963 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);

2964

2965 if (IsMSVCCXX || IsCoreCLR)

2966 UnwindDests.back().first->setIsEHFuncletEntry();

2967 if (!IsSEH)

2968 UnwindDests.back().first->setIsEHScopeEntry();

2969 }

2970 NewEHPadBB = CatchSwitch->getUnwindDest();

2971 } else {

2972 continue;

2973 }

2974

2975 BranchProbabilityInfo *BPI = FuncInfo.BPI;

2976 if (BPI && NewEHPadBB)

2978 EHPadBB = NewEHPadBB;

2979 }

2980 return true;

2981}

2982

2983bool IRTranslator::translateInvoke(const User &U,

2986 MCContext &Context = MF->getContext();

2987

2988 const BasicBlock *ReturnBB = I.getSuccessor(0);

2989 const BasicBlock *EHPadBB = I.getSuccessor(1);

2990

2991 const Function *Fn = I.getCalledFunction();

2992

2993

2995 return false;

2996

2997

2998 if (I.hasDeoptState())

2999 return false;

3000

3001

3003 return false;

3004

3005

3007 return false;

3008

3009

3010

3012 (MF->getTarget().getTargetTriple().isOSWindows() &&

3014 return false;

3015

3016 bool LowerInlineAsm = I.isInlineAsm();

3017 bool NeedEHLabel = true;

3018

3019

3020

3021 MCSymbol *BeginSymbol = nullptr;

3022 if (NeedEHLabel) {

3023 MIRBuilder.buildInstr(TargetOpcode::G_INVOKE_REGION_START);

3024 BeginSymbol = Context.createTempSymbol();

3025 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);

3026 }

3027

3028 if (LowerInlineAsm) {

3029 if (!translateInlineAsm(I, MIRBuilder))

3030 return false;

3031 } else if (!translateCallBase(I, MIRBuilder))

3032 return false;

3033

3034 MCSymbol *EndSymbol = nullptr;

3035 if (NeedEHLabel) {

3036 EndSymbol = Context.createTempSymbol();

3037 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);

3038 }

3039

3041 BranchProbabilityInfo *BPI = FuncInfo.BPI;

3042 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();

3043 BranchProbability EHPadBBProb =

3046

3047 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))

3048 return false;

3049

3050 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),

3051 &ReturnMBB = getMBB(*ReturnBB);

3052

3053 addSuccessorWithProb(InvokeMBB, &ReturnMBB);

3054 for (auto &UnwindDest : UnwindDests) {

3055 UnwindDest.first->setIsEHPad();

3056 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);

3057 }

3059

3060 if (NeedEHLabel) {

3061 assert(BeginSymbol && "Expected a begin symbol!");

3062 assert(EndSymbol && "Expected an end symbol!");

3063 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);

3064 }

3065

3066 MIRBuilder.buildBr(ReturnMBB);

3067 return true;

3068}

3069

3070

3071

3072bool IRTranslator::translateCallBr(const User &U,

3075 return false;

3076

3078 MachineBasicBlock *CallBrMBB = &MIRBuilder.getMBB();

3079

3081 if (I.isInlineAsm()) {

3082

3083

3084

3085 return false;

3086 }

3087 if (!translateIntrinsic(I, IID, MIRBuilder))

3088 return false;

3089

3090

3091 SmallPtrSet<BasicBlock *, 8> Dests = {I.getDefaultDest()};

3092 MachineBasicBlock *Return = &getMBB(*I.getDefaultDest());

3093

3094

3096

3097

3098

3099

3100

3101 for (BasicBlock *Dest : I.getIndirectDests()) {

3102 MachineBasicBlock &Target = getMBB(*Dest);

3103 Target.setIsInlineAsmBrIndirectTarget();

3104 Target.setLabelMustBeEmitted();

3105

3106 if (Dests.insert(Dest).second)

3108 }

3109

3111

3112

3113 MIRBuilder.buildBr(*Return);

3114

3115 return true;

3116}

3117

3118bool IRTranslator::translateLandingPad(const User &U,

3121

3122 MachineBasicBlock &MBB = MIRBuilder.getMBB();

3123

3125

3126

3127

3128 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();

3129 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&

3130 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)

3131 return true;

3132

3133

3134

3135

3136

3138 return true;

3139

3140

3141

3142 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)

3143 .addSym(MF->addLandingPad(&MBB));

3144

3145

3146

3147 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();

3148 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))

3149 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);

3150

3152 Register Undef = MRI->createGenericVirtualRegister(Ty);

3154

3158 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");

3159

3160

3161 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);

3162 if (!ExceptionReg)

3163 return false;

3164

3167 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);

3168

3169 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);

3170 if (!SelectorReg)

3171 return false;

3172

3174 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);

3175 MIRBuilder.buildCopy(PtrVReg, SelectorReg);

3176 MIRBuilder.buildCast(ResRegs[1], PtrVReg);

3177

3178 return true;

3179}

3180

3181bool IRTranslator::translateAlloca(const User &U,

3184

3186 return true;

3187

3189 Register Res = getOrCreateVReg(AI);

3190 int FI = getOrCreateFrameIndex(AI);

3192 return true;

3193 }

3194

3195

3196 if (MF->getTarget().getTargetTriple().isOSWindows())

3197 return false;

3198

3199

3201 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());

3203 if (MRI->getType(NumElts) != IntPtrTy) {

3204 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);

3206 NumElts = ExtElts;

3207 }

3208

3210

3211 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);

3213 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));

3214 MIRBuilder.buildMul(AllocSize, NumElts, TySize);

3215

3216

3217

3218

3219 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();

3220 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);

3221 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,

3223 auto AlignCst =

3224 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));

3225 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);

3226

3227 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));

3228 if (Alignment <= StackAlign)

3229 Alignment = Align(1);

3230 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);

3231

3232 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);

3233 assert(MF->getFrameInfo().hasVarSizedObjects());

3234 return true;

3235}

3236

3237bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {

3238

3239

3240

3241

3242 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},

3243 {getOrCreateVReg(*U.getOperand(0)),

3244 DL->getABITypeAlign(U.getType()).value()});

3245 return true;

3246}

3247

3248bool IRTranslator::translateUnreachable(const User &U,

3251 if (!UI.shouldLowerToTrap(MF->getTarget().Options.TrapUnreachable,

3252 MF->getTarget().Options.NoTrapAfterNoreturn))

3253 return true;

3254

3256 return true;

3257}

3258

3259bool IRTranslator::translateInsertElement(const User &U,

3261

3262

3264 FVT && FVT->getNumElements() == 1)

3265 return translateCopy(U, *U.getOperand(1), MIRBuilder);

3266

3267 Register Res = getOrCreateVReg(U);

3268 Register Val = getOrCreateVReg(*U.getOperand(0));

3269 Register Elt = getOrCreateVReg(*U.getOperand(1));

3270 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);

3273 if (CI->getBitWidth() != PreferredVecIdxWidth) {

3274 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);

3275 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);

3276 Idx = getOrCreateVReg(*NewIdxCI);

3277 }

3278 }

3279 if (!Idx)

3280 Idx = getOrCreateVReg(*U.getOperand(2));

3281 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {

3282 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);

3284 }

3286 return true;

3287}

3288

3289bool IRTranslator::translateInsertVector(const User &U,

3291 Register Dst = getOrCreateVReg(U);

3292 Register Vec = getOrCreateVReg(*U.getOperand(0));

3293 Register Elt = getOrCreateVReg(*U.getOperand(1));

3294

3296 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);

3297

3298

3299 if (CI->getBitWidth() != PreferredVecIdxWidth) {

3301 CI = ConstantInt::get(CI->getContext(), NewIdx);

3302 }

3303

3304

3306 ResultType && ResultType->getNumElements() == 1) {

3308 InputType && InputType->getNumElements() == 1) {

3309

3310

3311

3312 return translateCopy(U, *U.getOperand(0), MIRBuilder);

3313 }

3315

3316

3317

3318 Register Idx = getOrCreateVReg(*CI);

3320 return true;

3321 }

3323

3324

3325 LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);

3326 Register Idx = getOrCreateVReg(*CI);

3327 auto ScaledIndex = MIRBuilder.buildMul(

3328 VecIdxTy, MIRBuilder.buildVScale(VecIdxTy, 1), Idx);

3330 return true;

3331 }

3332 }

3333

3335 getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),

3336 getOrCreateVReg(*U.getOperand(1)), CI->getZExtValue());

3337 return true;

3338}

3339

3340bool IRTranslator::translateExtractElement(const User &U,

3342

3343

3344 if (const FixedVectorType *FVT =

3346 if (FVT->getNumElements() == 1)

3347 return translateCopy(U, *U.getOperand(0), MIRBuilder);

3348

3349 Register Res = getOrCreateVReg(U);

3350 Register Val = getOrCreateVReg(*U.getOperand(0));

3351 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);

3354 if (CI->getBitWidth() != PreferredVecIdxWidth) {

3356 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);

3357 Idx = getOrCreateVReg(*NewIdxCI);

3358 }

3359 }

3360 if (!Idx)

3361 Idx = getOrCreateVReg(*U.getOperand(1));

3362 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {

3363 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);

3365 }

3367 return true;

3368}

3369

3370bool IRTranslator::translateExtractVector(const User &U,

3372 Register Res = getOrCreateVReg(U);

3373 Register Vec = getOrCreateVReg(*U.getOperand(0));

3375 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);

3376

3377

3378 if (CI->getBitWidth() != PreferredVecIdxWidth) {

3380 CI = ConstantInt::get(CI->getContext(), NewIdx);

3381 }

3382

3383

3385 ResultType && ResultType->getNumElements() == 1) {

3387 InputType && InputType->getNumElements() == 1) {

3388

3389

3390 return translateCopy(U, *U.getOperand(0), MIRBuilder);

3391 }

3393

3394

3395

3396 Register Idx = getOrCreateVReg(*CI);

3398 return true;

3399 }

3401

3402

3403 LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);

3404 Register Idx = getOrCreateVReg(*CI);

3405 auto ScaledIndex = MIRBuilder.buildMul(

3406 VecIdxTy, MIRBuilder.buildVScale(VecIdxTy, 1), Idx);

3408 return true;

3409 }

3410 }

3411

3413 getOrCreateVReg(*U.getOperand(0)),

3415 return true;

3416}

3417

3418bool IRTranslator::translateShuffleVector(const User &U,

3420

3421

3422

3423

3424 if (U.getOperand(0)->getType()->isScalableTy()) {

3425 Register Val = getOrCreateVReg(*U.getOperand(0));

3427 MRI->getType(Val).getElementType(), Val, 0);

3429 return true;

3430 }

3431

3432 ArrayRef Mask;

3434 Mask = SVI->getShuffleMask();

3435 else

3437

3438

3439

3440

3442 unsigned SrcElts =

3444 if (DstElts == 1) {

3445 unsigned M = Mask[0];

3446 if (SrcElts == 1) {

3447 if (M == 0 || M == 1)

3448 return translateCopy(U, *U.getOperand(M), MIRBuilder);

3449 MIRBuilder.buildUndef(getOrCreateVReg(U));

3450 } else {

3451 Register Dst = getOrCreateVReg(U);

3452 if (M < SrcElts) {

3454 Dst, getOrCreateVReg(*U.getOperand(0)), M);

3455 } else if (M < SrcElts * 2) {

3457 Dst, getOrCreateVReg(*U.getOperand(1)), M - SrcElts);

3458 } else {

3460 }

3461 }

3462 return true;

3463 }

3464

3465

3466 if (SrcElts == 1) {

3469 for (int M : Mask) {

3470 LLT SrcTy = getLLTForType(*U.getOperand(0)->getType(), *DL);

3471 if (M == 0 || M == 1) {

3472 Ops.push_back(getOrCreateVReg(*U.getOperand(M)));

3473 } else {

3474 if (Undef.isValid()) {

3475 Undef = MRI->createGenericVirtualRegister(SrcTy);

3477 }

3478 Ops.push_back(Undef);

3479 }

3480 }

3482 return true;

3483 }

3484

3485 ArrayRef MaskAlloc = MF->allocateShuffleMask(Mask);

3486 MIRBuilder

3487 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},

3488 {getOrCreateVReg(*U.getOperand(0)),

3489 getOrCreateVReg(*U.getOperand(1))})

3490 .addShuffleMask(MaskAlloc);

3491 return true;

3492}

3493

3494bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {

3496

3497 SmallVector<MachineInstr *, 4> Insts;

3498 for (auto Reg : getOrCreateVRegs(PI)) {

3499 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});

3501 }

3502

3503 PendingPHIs.emplace_back(&PI, std::move(Insts));

3504 return true;

3505}

3506

3507bool IRTranslator::translateAtomicCmpXchg(const User &U,

3510

3511 auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);

3512

3513 auto Res = getOrCreateVRegs(I);

3514 Register OldValRes = Res[0];

3515 Register SuccessRes = Res[1];

3516 Register Addr = getOrCreateVReg(*I.getPointerOperand());

3517 Register Cmp = getOrCreateVReg(*I.getCompareOperand());

3518 Register NewVal = getOrCreateVReg(*I.getNewValOperand());

3519

3521 OldValRes, SuccessRes, Addr, Cmp, NewVal,

3522 *MF->getMachineMemOperand(

3523 MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),

3524 getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),

3525 I.getSuccessOrdering(), I.getFailureOrdering()));

3526 return true;

3527}

3528

3529bool IRTranslator::translateAtomicRMW(const User &U,

3532 return false;

3533

3535 auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);

3536

3537 Register Res = getOrCreateVReg(I);

3538 Register Addr = getOrCreateVReg(*I.getPointerOperand());

3539 Register Val = getOrCreateVReg(*I.getValOperand());

3540

3541 unsigned Opcode = 0;

3542 switch (I.getOperation()) {

3543 default:

3544 return false;

3546 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;

3547 break;

3549 Opcode = TargetOpcode::G_ATOMICRMW_ADD;

3550 break;

3552 Opcode = TargetOpcode::G_ATOMICRMW_SUB;

3553 break;

3555 Opcode = TargetOpcode::G_ATOMICRMW_AND;

3556 break;

3558 Opcode = TargetOpcode::G_ATOMICRMW_NAND;

3559 break;

3561 Opcode = TargetOpcode::G_ATOMICRMW_OR;

3562 break;

3564 Opcode = TargetOpcode::G_ATOMICRMW_XOR;

3565 break;

3567 Opcode = TargetOpcode::G_ATOMICRMW_MAX;

3568 break;

3570 Opcode = TargetOpcode::G_ATOMICRMW_MIN;

3571 break;

3573 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;

3574 break;

3576 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;

3577 break;

3579 Opcode = TargetOpcode::G_ATOMICRMW_FADD;

3580 break;

3582 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;

3583 break;

3585 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;

3586 break;

3588 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;

3589 break;

3591 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM;

3592 break;

3594 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM;

3595 break;

3597 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;

3598 break;

3600 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;

3601 break;

3603 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;

3604 break;

3606 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;

3607 break;

3608 }

3609

3611 Opcode, Res, Addr, Val,

3612 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),

3613 Flags, MRI->getType(Val), getMemOpAlign(I),

3614 I.getAAMetadata(), nullptr, I.getSyncScopeID(),

3615 I.getOrdering()));

3616 return true;

3617}

3618

3619bool IRTranslator::translateFence(const User &U,

3624 return true;

3625}

3626

3627bool IRTranslator::translateFreeze(const User &U,

3631

3633 "Freeze with different source and destination type?");

3634

3635 for (unsigned I = 0; I < DstRegs.size(); ++I) {

3637 }

3638

3639 return true;

3640}

3641

3642void IRTranslator::finishPendingPhis() {

3643#ifndef NDEBUG

3644 DILocationVerifier Verifier;

3645 GISelObserverWrapper WrapperObserver(&Verifier);

3646 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);

3647#endif

3648 for (auto &Phi : PendingPHIs) {

3649 const PHINode *PI = Phi.first;

3651 continue;

3653 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();

3654 EntryBuilder->setDebugLoc(PI->getDebugLoc());

3655#ifndef NDEBUG

3656 Verifier.setCurrentInst(PI);

3657#endif

3658

3659 SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;

3663 for (auto *Pred : getMachinePredBBs({IRPred, PI->getParent()})) {

3665 continue;

3666 SeenPreds.insert(Pred);

3667 for (unsigned j = 0; j < ValRegs.size(); ++j) {

3668 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);

3669 MIB.addUse(ValRegs[j]);

3671 }

3672 }

3673 }

3674 }

3675}

3676

3677void IRTranslator::translateDbgValueRecord(Value *V, bool HasArgList,

3683 "Expected inlined-at fields to agree");

3684

3686

3687 if (!V || HasArgList) {

3688

3689

3691 return;

3692 }

3693

3696 return;

3697 }

3698

3701

3702

3703

3704 auto ExprOperands = Expression->getElements();

3705 auto *ExprDerefRemoved =

3707 MIRBuilder.buildFIDbgValue(getOrCreateFrameIndex(*AI), Variable,

3708 ExprDerefRemoved);

3709 return;

3710 }

3711 if (translateIfEntryValueArgument(false, V, Variable, Expression, DL,

3712 MIRBuilder))

3713 return;

3714 for (Register Reg : getOrCreateVRegs(*V)) {

3715

3716

3717

3718

3720 }

3721}

3722

3723void IRTranslator::translateDbgDeclareRecord(Value *Address, bool HasArgList,

3729 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *Variable << "\n");

3730 return;

3731 }

3732

3734 "Expected inlined-at fields to agree");

3737

3738

3739 MF->setVariableDbgInfo(Variable, Expression,

3740 getOrCreateFrameIndex(*AI), DL);

3741 return;

3742 }

3743

3744 if (translateIfEntryValueArgument(true, Address, Variable,

3745 Expression, DL,

3746 MIRBuilder))

3747 return;

3748

3749

3750

3753 Expression);

3754}

3755

3756void IRTranslator::translateDbgInfo(const Instruction &Inst,

3760 MIRBuilder.setDebugLoc(DLR->getDebugLoc());

3761 assert(DLR->getLabel() && "Missing label");

3762 assert(DLR->getLabel()->isValidLocationForIntrinsic(

3764 "Expected inlined-at fields to agree");

3766 continue;

3767 }

3770 const DIExpression *Expression = DVR.getExpression();

3773 translateDbgDeclareRecord(V, DVR.hasArgList(), Variable, Expression,

3775 else

3776 translateDbgValueRecord(V, DVR.hasArgList(), Variable, Expression,

3778 }

3779}

3780

3781bool IRTranslator::translate(const Instruction &Inst) {

3783 CurBuilder->setPCSections(Inst.getMetadata(LLVMContext::MD_pcsections));

3784 CurBuilder->setMMRAMetadata(Inst.getMetadata(LLVMContext::MD_mmra));

3785

3786 if (TLI->fallBackToDAGISel(Inst))

3787 return false;

3788

3790#define HANDLE_INST(NUM, OPCODE, CLASS) \

3791 case Instruction::OPCODE: \

3792 return translate##OPCODE(Inst, *CurBuilder.get());

3793#include "llvm/IR/Instruction.def"

3794 default:

3795 return false;

3796 }

3797}

3798

3800

3801

3802 if (auto CurrInstDL = CurBuilder->getDL())

3803 EntryBuilder->setDebugLoc(DebugLoc());

3804

3806

3809 EntryBuilder->buildConstant(Reg, *CI);

3811

3813 CF = ConstantFP::get(CF->getContext(), CF->getValue());

3814 EntryBuilder->buildFConstant(Reg, *CF);

3816 EntryBuilder->buildUndef(Reg);

3818 EntryBuilder->buildConstant(Reg, 0);

3820 EntryBuilder->buildGlobalValue(Reg, GV);

3822 Register Addr = getOrCreateVReg(*CPA->getPointer());

3823 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());

3824 EntryBuilder->buildConstantPtrAuth(Reg, CPA, Addr, AddrDisc);

3826 Constant &Elt = *CAZ->getElementValue(0u);

3828 EntryBuilder->buildSplatVector(Reg, getOrCreateVReg(Elt));

3829 return true;

3830 }

3831

3832 unsigned NumElts = CAZ->getElementCount().getFixedValue();

3833 if (NumElts == 1)

3834 return translateCopy(C, Elt, *EntryBuilder);

3835

3836 EntryBuilder->buildSplatBuildVector(Reg, getOrCreateVReg(Elt));

3838

3839 if (CV->getNumElements() == 1)

3840 return translateCopy(C, *CV->getElementAsConstant(0), *EntryBuilder);

3842 for (unsigned i = 0; i < CV->getNumElements(); ++i) {

3843 Constant &Elt = *CV->getElementAsConstant(i);

3844 Ops.push_back(getOrCreateVReg(Elt));

3845 }

3846 EntryBuilder->buildBuildVector(Reg, Ops);

3848 switch(CE->getOpcode()) {

3849#define HANDLE_INST(NUM, OPCODE, CLASS) \

3850 case Instruction::OPCODE: \

3851 return translate##OPCODE(*CE, *EntryBuilder.get());

3852#include "llvm/IR/Instruction.def"

3853 default:

3854 return false;

3855 }

3857 if (CV->getNumOperands() == 1)

3858 return translateCopy(C, *CV->getOperand(0), *EntryBuilder);

3860 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {

3861 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));

3862 }

3863 EntryBuilder->buildBuildVector(Reg, Ops);

3865 EntryBuilder->buildBlockAddress(Reg, BA);

3866 } else

3867 return false;

3868

3869 return true;

3870}

3871

3872bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,

3874 for (auto &BTB : SL->BitTestCases) {

3875

3876 if (!BTB.Emitted)

3877 emitBitTestHeader(BTB, BTB.Parent);

3878

3879 BranchProbability UnhandledProb = BTB.Prob;

3880 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {

3881 UnhandledProb -= BTB.Cases[j].ExtraProb;

3882

3883 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;

3884

3885

3886

3887

3888

3889

3890

3891

3892 MachineBasicBlock *NextMBB;

3893 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {

3894

3895

3896 NextMBB = BTB.Cases[j + 1].TargetBB;

3897 } else if (j + 1 == ej) {

3898

3899 NextMBB = BTB.Default;

3900 } else {

3901

3902 NextMBB = BTB.Cases[j + 1].ThisBB;

3903 }

3904

3905 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);

3906

3907 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {

3908

3909

3910

3911 addMachineCFGPred({BTB.Parent->getBasicBlock(),

3912 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},

3914

3915 BTB.Cases.pop_back();

3916 break;

3917 }

3918 }

3919

3920

3921 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),

3922 BTB.Default->getBasicBlock()};

3923 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);

3924 if (!BTB.ContiguousRange) {

3925 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);

3926 }

3927 }

3928 SL->BitTestCases.clear();

3929

3930 for (auto &JTCase : SL->JTCases) {

3931

3932 if (!JTCase.first.Emitted)

3933 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);

3934

3935 emitJumpTable(JTCase.second, JTCase.second.MBB);

3936 }

3937 SL->JTCases.clear();

3938

3939 for (auto &SwCase : SL->SwitchCases)

3940 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);

3941 SL->SwitchCases.clear();

3942

3943

3945 if (SP.shouldEmitSDCheck(BB)) {

3946 bool FunctionBasedInstrumentation =

3947 TLI->getSSPStackGuardCheck(*MF->getFunction().getParent());

3948 SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);

3949 }

3950

3951 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {

3952 LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");

3953 return false;

3954 } else if (SPDescriptor.shouldEmitStackProtector()) {

3955 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();

3956 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();

3957

3958

3959

3960

3961

3962

3963

3965 ParentMBB, *MF->getSubtarget().getInstrInfo());

3966

3967

3968 SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,

3969 ParentMBB->end());

3970

3971

3972 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))

3973 return false;

3974

3975

3976 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();

3977 if (FailureMBB->empty()) {

3978 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))

3979 return false;

3980 }

3981

3982

3983 SPDescriptor.resetPerBBState();

3984 }

3985 return true;

3986}

3987

3990 CurBuilder->setInsertPt(*ParentBB, ParentBB->end());

3991

3994 LLT PtrMemTy = getLLTForMVT(TLI->getPointerMemTy(*DL));

3995

3998

4000 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);

4003

4004

4006 CurBuilder

4007 ->buildLoad(PtrMemTy, StackSlotPtr,

4010 .getReg(0);

4011

4012 if (TLI->useStackGuardXorFP()) {

4013 LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");

4014 return false;

4015 }

4016

4017

4018 if (const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) {

4019

4020

4021

4022

4023

4024 (void)GuardCheckFn;

4025 return false;

4026#if 0

4027

4028

4029

4030 FunctionType *FnTy = GuardCheckFn->getFunctionType();

4031 assert(FnTy->getNumParams() == 1 && "Invalid function signature");

4032 ISD::ArgFlagsTy Flags;

4033 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))

4034 Flags.setInReg();

4035 CallLowering::ArgInfo GuardArgInfo(

4036 {GuardVal, FnTy->getParamType(0), {Flags}});

4037

4038 CallLowering::CallLoweringInfo Info;

4039 Info.OrigArgs.push_back(GuardArgInfo);

4040 Info.CallConv = GuardCheckFn->getCallingConv();

4042 Info.OrigRet = {Register(), FnTy->getReturnType()};

4043 if (!CLI->lowerCall(MIRBuilder, Info)) {

4044 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");

4045 return false;

4046 }

4047 return true;

4048#endif

4049 }

4050

4051

4052

4054 Guard =

4056 getStackGuard(Guard, *CurBuilder);

4057 } else {

4058

4059 const Value *IRGuard = TLI->getSDagStackGuard(M);

4060 Register GuardPtr = getOrCreateVReg(*IRGuard);

4061

4062 Guard = CurBuilder

4063 ->buildLoad(PtrMemTy, GuardPtr,

4067 .getReg(0);

4068 }

4069

4070

4071 auto Cmp =

4073

4074 CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());

4075

4077 return true;

4078}

4079

4082 CurBuilder->setInsertPt(*FailureBB, FailureBB->end());

4083

4084 const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;

4085 const char *Name = TLI->getLibcallName(Libcall);

4086

4087 CallLowering::CallLoweringInfo Info;

4088 Info.CallConv = TLI->getLibcallCallingConv(Libcall);

4091 0};

4092 if (!CLI->lowerCall(*CurBuilder, Info)) {

4093 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");

4094 return false;

4095 }

4096

4097

4098 const TargetOptions &TargetOpts = TLI->getTargetMachine().Options;

4100 CurBuilder->buildInstr(TargetOpcode::G_TRAP);

4101

4102 return true;

4103}

4104

4105void IRTranslator::finalizeFunction() {

4106

4107

4108 PendingPHIs.clear();

4109 VMap.reset();

4110 FrameIndices.clear();

4111 MachinePreds.clear();

4112

4113

4114

4115 EntryBuilder.reset();

4116 CurBuilder.reset();

4117 FuncInfo.clear();

4118 SPDescriptor.resetPerFunctionState();

4119}

4120

4121

4122

4124 if (!IsVarArg)

4125 return false;

4126

4127

4128

4131 return CI && CI->isMustTailCall();

4132 });

4133}

4134

4136 MF = &CurMF;

4137 const Function &F = MF->getFunction();

4140

4145 : TPC->isGISelCSEEnabled();

4146 TLI = MF->getSubtarget().getTargetLowering();

4147

4148 if (EnableCSE) {

4149 EntryBuilder = std::make_unique(CurMF);

4150 CSEInfo = &Wrapper.get(TPC->getCSEConfig());

4151 EntryBuilder->setCSEInfo(CSEInfo);

4152 CurBuilder = std::make_unique(CurMF);

4153 CurBuilder->setCSEInfo(CSEInfo);

4154 } else {

4155 EntryBuilder = std::make_unique();

4156 CurBuilder = std::make_unique();

4157 }

4158 CLI = MF->getSubtarget().getCallLowering();

4159 CurBuilder->setMF(*MF);

4160 EntryBuilder->setMF(*MF);

4161 MRI = &MF->getRegInfo();

4162 DL = &F.getDataLayout();

4163 ORE = std::make_unique(&F);

4167 FuncInfo.MF = MF;

4168 if (EnableOpts) {

4171 } else {

4172 AA = nullptr;

4173 FuncInfo.BPI = nullptr;

4174 }

4175

4177 MF->getFunction());

4179 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);

4180

4181 SL = std::make_unique(this, FuncInfo);

4182 SL->init(*TLI, TM, *DL);

4183

4184 assert(PendingPHIs.empty() && "stale PHIs");

4185

4186

4187

4188 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {

4189

4191 F.getSubprogram(), &F.getEntryBlock());

4192 R << "unable to translate in big endian mode";

4194 return false;

4195 }

4196

4197

4198 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });

4199

4200

4203 EntryBuilder->setMBB(*EntryBB);

4204

4205 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();

4206 SwiftError.setFunction(CurMF);

4207 SwiftError.createEntriesInEntryBlock(DbgLoc);

4208

4209 bool IsVarArg = F.isVarArg();

4210 bool HasMustTailInVarArgFn = false;

4211

4212

4213 FuncInfo.MBBMap.resize(F.getMaxBlockNumber());

4215 auto *&MBB = FuncInfo.MBBMap[BB.getNumber()];

4216

4217 MBB = MF->CreateMachineBasicBlock(&BB);

4218 MF->push_back(MBB);

4219

4221 MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB));

4222

4223 if (!HasMustTailInVarArgFn)

4225 }

4226

4227 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);

4228

4229

4230 EntryBB->addSuccessor(&getMBB(F.front()));

4231

4232 if (CLI->fallBackToDAGISel(*MF)) {

4234 F.getSubprogram(), &F.getEntryBlock());

4235 R << "unable to lower function: "

4236 << ore::NV("Prototype", F.getFunctionType());

4238 return false;

4239 }

4240

4241

4243 for (const Argument &Arg: F.args()) {

4244 if (DL->getTypeStoreSize(Arg.getType()).isZero())

4245 continue;

4248

4249 if (Arg.hasSwiftErrorAttr()) {

4250 assert(VRegs.size() == 1 && "Too many vregs for Swift error");

4251 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);

4252 }

4253 }

4254

4255 if (!CLI->lowerFormalArguments(*EntryBuilder, F, VRegArgs, FuncInfo)) {

4257 F.getSubprogram(), &F.getEntryBlock());

4258 R << "unable to lower arguments: "

4259 << ore::NV("Prototype", F.getFunctionType());

4261 return false;

4262 }

4263

4264

4266 if (EnableCSE && CSEInfo)

4268 {

4270#ifndef NDEBUG

4271 DILocationVerifier Verifier;

4273#endif

4277

4278

4279 CurBuilder->setMBB(MBB);

4280 HasTailCall = false;

4282

4283

4284

4285

4286

4287 if (HasTailCall)

4288 break;

4289#ifndef NDEBUG

4290 Verifier.setCurrentInst(&Inst);

4291#endif

4292

4293

4294 translateDbgInfo(Inst, *CurBuilder);

4295

4296 if (translate(Inst))

4297 continue;

4298

4301 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);

4302

4303 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {

4304 std::string InstStrStorage;

4306 InstStr << Inst;

4307

4308 R << ": '" << InstStrStorage << "'";

4309 }

4310

4312 return false;

4313 }

4314

4315 if (!finalizeBasicBlock(*BB, MBB)) {

4317 BB->getTerminator()->getDebugLoc(), BB);

4318 R << "unable to translate basic block";

4320 return false;

4321 }

4322 }

4323#ifndef NDEBUG

4325#endif

4326 }

4327

4328 finishPendingPhis();

4329

4330 SwiftError.propagateVRegs();

4331

4332

4333

4334

4335 assert(EntryBB->succ_size() == 1 &&

4336 "Custom BB used for lowering should have only one successor");

4337

4340 "LLVM-IR entry block has a predecessor!?");

4341

4342

4343 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),

4344 EntryBB->end());

4345

4346

4350

4351

4352 EntryBB->removeSuccessor(&NewEntryBB);

4353 MF->remove(EntryBB);

4354 MF->deleteMachineBasicBlock(EntryBB);

4355

4356 assert(&MF->front() == &NewEntryBB &&

4357 "New entry wasn't next in the list of basic block!");

4358

4359

4361 SP.copyToMachineFrameInfo(MF->getFrameInfo());

4362

4363 return false;

4364}

unsigned const MachineRegisterInfo * MRI

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

amdgpu aa AMDGPU Address space based Alias Analysis Wrapper

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

Analysis containing CSE Info

Provides analysis for continuously CSEing during GISel passes.

This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.

This file describes how to lower LLVM calls to machine code calls.

This file contains the declarations for the subclasses of Constant, which represent the different fla...

This contains common code to allow clients to notify changes to machine instr.

static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)

Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.

Definition IRTranslator.cpp:4123

static bool targetSupportsBF16Type(const MachineFunction *MF)

Definition IRTranslator.cpp:298

static bool containsBF16Type(const User &U)

Definition IRTranslator.cpp:302

static unsigned getConvOpcode(Intrinsic::ID ID)

Definition IRTranslator.cpp:2161

static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)

Definition IRTranslator.cpp:1468

static unsigned getConstrainedOpcode(Intrinsic::ID ID)

Definition IRTranslator.cpp:2074

IRTranslator LLVM IR MI

Definition IRTranslator.cpp:110

IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)

Definition IRTranslator.cpp:113

static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))

static bool isValInBlock(const Value *V, const BasicBlock *BB)

Definition IRTranslator.cpp:431

static bool isSwiftError(const Value *V)

Definition IRTranslator.cpp:1372

This file declares the IRTranslator pass.

This file provides various utilities for inspecting and working with the control flow graph in LLVM I...

This file describes how to lower LLVM inline asm to machine code INLINEASM.

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

Implement a low-level type suitable for MachineInstr level instruction selection.

Implement a low-level type suitable for MachineInstr level instruction selection.

Machine Check Debug Module

This file declares the MachineIRBuilder class.

Register const TargetRegisterInfo * TRI

Promote Memory to Register

OptimizedStructLayoutField Field

#define INITIALIZE_PASS_DEPENDENCY(depName)

#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)

#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)

This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.

const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB

const SmallVectorImpl< MachineOperand > & Cond

std::pair< BasicBlock *, BasicBlock * > Edge

verify safepoint Safepoint IR Verifier

This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...

This file defines the SmallVector class.

This file describes how to lower LLVM code to machine code.

Target-Independent Code Generator Pass Configuration Options pass.

A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.

LLVM_ABI APInt zextOrTrunc(unsigned width) const

Zero extend or truncate to width.

an instruction to allocate memory on the stack

bool isSwiftError() const

Return true if this alloca is used as a swifterror argument to a call.

LLVM_ABI bool isStaticAlloca() const

Return true if this alloca is in the entry block of the function and is a constant size.

Align getAlign() const

Return the alignment of the memory that is being allocated by the instruction.

PointerType * getType() const

Overload to return most specific pointer type.

Type * getAllocatedType() const

Return the type that is being allocated by the instruction.

const Value * getArraySize() const

Get the number of elements allocated.

Represent the analysis usage information of a pass.

AnalysisUsage & addRequired()

AnalysisUsage & addPreserved()

Add the specified Pass class to the set of analyses preserved by this pass.

This class represents an incoming formal argument to a Function.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

An immutable pass that tracks lazily created AssumptionCache objects.

@ USubCond

Subtract only if no unsigned overflow.

@ FMinimum

*p = minimum(old, v) minimum matches the behavior of llvm.minimum.

@ Min

*p = old <signed v ? old : v

@ USubSat

*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.

@ FMaximum

*p = maximum(old, v) maximum matches the behavior of llvm.maximum.

@ UIncWrap

Increment one up to a maximum value.

@ Max

*p = old >signed v ? old : v

@ UMin

*p = old <unsigned v ? old : v

@ FMin

*p = minnum(old, v) minnum matches the behavior of llvm.minnum.

@ UMax

*p = old >unsigned v ? old : v

@ FMax

*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.

@ UDecWrap

Decrement one until a minimum value or zero.

LLVM Basic Block Representation.

unsigned getNumber() const

const Function * getParent() const

Return the enclosing method, or null if none.

bool hasAddressTaken() const

Returns true if there are any uses of this basic block other than direct branches,...

LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const

Returns an iterator to the first instruction in this block that is not a PHINode instruction.

InstListType::const_iterator const_iterator

LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const

Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...

LLVM_ABI const Module * getModule() const

Return the module owning the function this basic block belongs to, or nullptr if the function does no...

Legacy analysis pass which computes BlockFrequencyInfo.

BasicBlock * getSuccessor(unsigned i) const

bool isUnconditional() const

Value * getCondition() const

Legacy analysis pass which computes BranchProbabilityInfo.

LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const

Get an edge's probability, relative to other out-edges of the Src.

static BranchProbability getOne()

static BranchProbability getZero()

static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

bool isInlineAsm() const

Check if this call is an inline asm statement.

std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const

Return an operand bundle by name, if present.

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Determine whether the argument or parameter has the given attribute.

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

unsigned countOperandBundlesOfType(StringRef Name) const

Return the number of operand bundles with the tag Name attached to this instruction.

Value * getCalledOperand() const

Value * getArgOperand(unsigned i) const

User::op_iterator arg_end()

Return the iterator pointing to the end of the argument list.

bool isConvergent() const

Determine if the invoke is convergent.

LLVM_ABI Intrinsic::ID getIntrinsicID() const

Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...

iterator_range< User::op_iterator > args()

Iteration adapter for range-for loops.

unsigned arg_size() const

AttributeList getAttributes() const

Return the attributes for this call.

This class represents a function call, abstracting a target machine's calling convention.

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

@ FCMP_TRUE

1 1 1 1 Always true (always folded)

@ ICMP_SLT

signed less than

@ ICMP_SLE

signed less or equal

@ ICMP_UGT

unsigned greater than

@ ICMP_ULE

unsigned less or equal

@ FCMP_FALSE

0 0 0 0 Always false (always folded)

bool isFPPredicate() const

bool isIntPredicate() const

static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)

bool isZero() const

This is just a convenience method to make client code smaller for a common code.

unsigned getBitWidth() const

getBitWidth - Return the scalar bitwidth of this constant.

uint64_t getZExtValue() const

Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...

const APInt & getValue() const

Return the constant as an APInt value reference.

This is an important base class in LLVM.

static LLVM_ABI Constant * getAllOnesValue(Type *Ty)

static LLVM_ABI Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

This is the common base class for constrained floating point intrinsics.

LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const

LLVM_ABI unsigned getNonMetadataArgCount() const

LLVM_ABI bool isEntryValue() const

Check if the expression consists of exactly one entry value operand.

static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)

Append the opcodes Ops to DIExpr.

LLVM_ABI bool startsWithDeref() const

Return whether the first element a DW_OP_deref.

ArrayRef< uint64_t > getElements() const

bool isValidLocationForIntrinsic(const DILocation *DL) const

Check that a location is valid for this label.

A parsed version of the target data layout string in and methods for querying it.

Value * getAddress() const

DILabel * getLabel() const

DebugLoc getDebugLoc() const

Value * getValue(unsigned OpIdx=0) const

DILocalVariable * getVariable() const

DIExpression * getExpression() const

LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const

DIExpression * getExpression() const

DILocalVariable * getVariable() const

bool isDbgDeclare() const

Class representing an expression and its matching format.

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this fence instruction.

AtomicOrdering getOrdering() const

Returns the ordering constraint of this fence instruction.

static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)

bool skipFunction(const Function &F) const

Optional passes call this function to check whether the pass should be skipped.

bool hasMinSize() const

Optimize this function for minimum size (-Oz).

Constant * getPersonalityFn() const

Get the personality function associated with this function.

const Function & getFunction() const

bool isIntrinsic() const

isIntrinsic - Returns true if the function's name starts with "llvm.".

The actual analysis pass wrapper.

Simple wrapper that does the following.

Abstract class that contains various methods for clients to notify about changes.

Simple wrapper observer that takes several observers, and calls each one for each event.

void removeObserver(GISelChangeObserver *O)

void addObserver(GISelChangeObserver *O)

static StringRef dropLLVMManglingEscape(StringRef Name)

If the given string begins with the GlobalValue name mangling escape character '\1',...

bool hasExternalWeakLinkage() const

bool hasDLLImportStorageClass() const

Module * getParent()

Get the module that this global value is contained inside of...

bool runOnMachineFunction(MachineFunction &MF) override

runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...

Definition IRTranslator.cpp:4135

IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)

Definition IRTranslator.cpp:131

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...

Definition IRTranslator.cpp:173

bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const

Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...

This instruction inserts a struct field of array element value into an aggregate value.

iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const

Return a range over the DbgRecords attached to this instruction.

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

LLVM_ABI const Module * getModule() const

Return the module owning the function this instruction belongs to or nullptr it the function does not...

bool hasMetadata() const

Return true if this instruction has any metadata attached to it.

MDNode * getMetadata(unsigned KindID) const

Get the metadata of given kind attached to this Instruction.

LLVM_ABI AAMDNodes getAAMetadata() const

Returns the AA metadata for this instruction.

unsigned getOpcode() const

Returns a member of one of the enums like Instruction::Add.

LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY

Determine whether the allow-reassociation flag is set.

Intrinsic::ID getIntrinsicID() const

Return the intrinsic ID of this intrinsic.

constexpr LLT changeElementType(LLT NewEltTy) const

If this type is a vector, return a vector with the same number of elements but the new element type.

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr uint16_t getNumElements() const

Returns the number of elements in a vector LLT.

constexpr bool isVector() const

static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)

Get a low-level pointer in the given address space.

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr bool isPointer() const

static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)

Get a low-level fixed-width vector of some number of elements and element width.

constexpr bool isFixedVector() const

Returns true if the LLT is a fixed vector.

LLVM_ABI void diagnose(const DiagnosticInfo &DI)

Report a message to the currently installed diagnostic handler.

Value * getPointerOperand()

AtomicOrdering getOrdering() const

Returns the ordering constraint of this load instruction.

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this load instruction.

static LocationSize precise(uint64_t Value)

static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)

unsigned pred_size() const

void normalizeSuccProbs()

Normalize probabilities of all successors so that the sum of them becomes one.

LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)

Insert MI into the instruction list before I, possibly inside a bundle.

void push_back(MachineInstr *MI)

const BasicBlock * getBasicBlock() const

Return the LLVM basic block that this instance corresponded to originally.

LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)

Set successor probability of a given iterator.

succ_iterator succ_begin()

LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())

Add Succ as a successor of this MachineBasicBlock.

SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator

LLVM_ABI void sortUniqueLiveIns()

Sorts and uniques the LiveIns vector.

LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const

Return true if the specified MBB is a predecessor of this block.

void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())

Adds the specified register as a live in.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

void splice(iterator Where, MachineBasicBlock *Other, iterator From)

Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...

MachineInstrBundleIterator< MachineInstr > iterator

void setIsEHPad(bool V=true)

Indicates the block is a landing pad.

int getStackProtectorIndex() const

Return the index for the stack protector object.

MachineFunctionPass(char &ID)

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

Function & getFunction()

Return the LLVM function that this machine code represents.

BasicBlockListType::iterator iterator

MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)

CreateMachineInstr - Allocate a new MachineInstr.

void insert(iterator MBBI, MachineBasicBlock *MBB)

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

Helper class to build MachineInstr.

MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0)

Build and insert Res = G_FPTOUI_SAT Src0.

MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)

Build and insert Dst = G_FREEZE Src.

MachineInstrBuilder buildBr(MachineBasicBlock &Dest)

Build and insert G_BR Dest.

MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)

Build and insert Fract, Int = G_FMODF Src.

LLVMContext & getContext() const

MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_ADD Op0, Op1.

MachineInstrBuilder buildUndef(const DstOp &Res)

Build and insert Res = IMPLICIT_DEF.

MachineInstrBuilder buildResetFPMode()

Build and insert G_RESET_FPMODE.

MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FPEXT Op.

MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0)

Build and insert Res = G_FPTOSI_SAT Src0.

MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)

Build and insert a Res = G_UCMP Op0, Op1.

MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)

Build and insert Res = G_JUMP_TABLE JTI.

MachineInstrBuilder buildGetRounding(const DstOp &Dst)

Build and insert Dst = G_GET_ROUNDING.

MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)

Build and insert a Res = G_SCMP Op0, Op1.

MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)

Build and insert G_FENCE Ordering, Scope.

MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert a Res = G_SELECT Tst, Op0, Op1.

MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FMA Op0, Op1, Op2.

MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_MUL Op0, Op1.

MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)

Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.

MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)

Build and insert Res = G_AND Op0, Op1.

MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)

Build and insert an appropriate cast between two registers of equal size.

MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert a Res = G_ICMP Pred, Op0, Op1.

MachineBasicBlock::iterator getInsertPt()

Current insertion point for new instructions.

MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)

Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...

MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)

Build and insert OldValRes = G_ATOMICRMW_ Addr, Val, MMO.

MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_SUB Op0, Op1.

MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)

Build and insert a G_INTRINSIC instruction.

MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)

Build and insert Res = G_VSCALE MinElts.

MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)

Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.

MachineInstrBuilder buildSetFPMode(const SrcOp &Src)

Build and insert G_SET_FPMODE Src.

MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...

MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)

Build and insert Res = G_BUILD_VECTOR Op0, ...

MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...

MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)

Build and insert G_BRCOND Tst, Dest.

std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)

Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...

MachineInstrBuilder buildSetRounding(const SrcOp &Src)

Build and insert G_SET_ROUNDING.

MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)

Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.

MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert Res = G_LOAD Addr, MMO.

MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_PTR_ADD Op0, Op1.

MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)

Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...

MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)

Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.

MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert G_STORE Val, Addr, MMO.

MachineInstrBuilder buildInstr(unsigned Opcode)

Build and insert = Opcode .

MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)

Build and insert Res = G_FRAME_INDEX Idx.

MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...

MachineInstrBuilder buildDbgLabel(const MDNode *Label)

Build and insert a DBG_LABEL instructions specifying that Label is given.

MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)

Build and insert G_BRJT TablePtr, JTI, IndexReg.

MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)

Build and insert Res = G_DYN_STACKALLOC Size, Align.

MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...

MachineInstrBuilder buildResetFPEnv()

Build and insert G_RESET_FPENV.

void setDebugLoc(const DebugLoc &DL)

Set the debug location to DL for all the next build instructions.

const MachineBasicBlock & getMBB() const

Getter for the basic block we currently build.

MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)

Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.

MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)

Build and insert OldValRes, SuccessRes = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...

void setMBB(MachineBasicBlock &MBB)

Set the insertion point to the end of MBB.

const DebugLoc & getDebugLoc()

Get the current instruction's debug location.

MachineInstrBuilder buildTrap(bool Debug=false)

Build and insert G_TRAP or G_DEBUGTRAP.

MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)

Build and insert Fract, Exp = G_FFREXP Src.

MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FPTRUNC Op.

MachineInstrBuilder buildFSincos(const DstOp &Sin, const DstOp &Cos, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)

Build and insert Sin, Cos = G_FSINCOS Src.

MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)

Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.

MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)

Build but don't insert = Opcode .

MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)

Build and insert Res = COPY Op.

MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)

Build and insert G_PREFETCH Addr, RW, Locality, CacheType.

MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)

Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.

const DataLayout & getDataLayout() const

MachineInstrBuilder buildBrIndirect(Register Tgt)

Build and insert G_BRINDIRECT Tgt.

MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)

Build and insert Res = G_SPLAT_VECTOR Val.

MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)

Build and insert Res = G_STEP_VECTOR Step.

virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)

Build and insert Res = G_CONSTANT Val.

MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert a Res = G_FCMP PredOp0, Op1.

MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FADD Op0, Op1.

MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)

Build and insert G_SET_FPENV Src.

Register getReg(unsigned Idx) const

Get the register for the operand index.

const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & addMetadata(const MDNode *MD) const

const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const

const MachineInstrBuilder & addFrameIndex(int Idx) const

const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register use operand.

const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const

MachineInstr * getInstr() const

If conversion operators fail, use this method to get the MachineInstr explicitly.

const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register definition operand.

LLVM_ABI void copyIRFlags(const Instruction &I)

Copy all flags to MachineInst MIFlags.

static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)

LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS)

void setDebugLoc(DebugLoc DL)

Replace current source information with new such.

Flags

Flags values. These may be or'd together.

@ MOVolatile

The memory access is volatile.

@ MODereferenceable

The memory access is dereferenceable (i.e., doesn't trap).

@ MOLoad

The memory access reads data.

@ MOInvariant

The memory access always returns the same value (or traps).

@ MOStore

The memory access writes data.

static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)

static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)

BasicBlock * getIncomingBlock(unsigned i) const

Return incoming basic block number i.

Value * getIncomingValue(unsigned i) const

Return incoming value number x.

unsigned getNumIncomingValues() const

Return the number of incoming edges.

AnalysisType & getAnalysis() const

getAnalysis() - This function is used by subclasses to get to the analysis information ...

static PointerType * getUnqual(Type *ElementType)

This constructs a pointer to an object of the specified type in the default address space (address sp...

Class to install both of the above.

Wrapper class representing virtual and physical registers.

Value * getReturnValue() const

Convenience accessor. Returns null if there is no return value.

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...

MachineBasicBlock * getSuccessMBB()

MachineBasicBlock * getFailureMBB()

constexpr bool empty() const

empty - Check if the string is empty.

constexpr const char * data() const

data - Get a pointer to the start of the string (which may not be null terminated).

virtual bool isTailCall(const MachineInstr &Inst) const

Determines whether Inst is a tail call instruction.

Primary interface to the complete machine description for the target machine.

const Triple & getTargetTriple() const

const Target & getTarget() const

void resetTargetOptions(const Function &F) const

Reset the target options based on the function's attributes.

unsigned NoTrapAfterNoreturn

Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...

unsigned TrapUnreachable

Emit target-specific trap instruction for 'unreachable' IR instructions.

FPOpFusion::FPOpFusionMode AllowFPOpFusion

AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.

Target-Independent Code Generator Pass Configuration Options.

bool isSPIRV() const

Tests whether the target is SPIR-V (32/64-bit/Logical).

Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...

The instances of the Type class are immutable: once they are created, they are never changed.

LLVM_ABI bool isEmptyTy() const

Return true if this type is empty, that is, it has no elements or all of its elements are empty.

static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)

static LLVM_ABI Type * getVoidTy(LLVMContext &C)

bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const

Return true if it makes sense to take the size of this type.

bool isAggregateType() const

Return true if the type is an aggregate type.

bool isTokenTy() const

Return true if this is 'token'.

bool isVoidTy() const

Return true if this is 'void'.

Value * getOperand(unsigned i) const

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

bool hasOneUse() const

Return true if there is exactly one use of this value.

LLVM_ABI const Value * stripPointerCasts() const

Strip off pointer casts, all-zero GEPs and address space casts.

LLVM_ABI LLVMContext & getContext() const

All values hold a context through their type.

constexpr bool isZero() const

const ParentTy * getParent() const

NodeTy * getNextNode()

Get the next node, or nullptr for the list tail.

A raw_ostream that writes to an std::string.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Align[]

Key for Kernel::Arg::Metadata::mAlign.

constexpr char Args[]

Key for Kernel::Metadata::mArgs.

constexpr char SymbolName[]

Key for Kernel::Metadata::mSymbolName.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ C

The default llvm calling convention, compatible with C.

@ BasicBlock

Various leaf nodes.

@ Libcall

The operation should be implemented as a call to some kind of runtime support library.

BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)

Matches a register not-ed by a G_XOR.

OneUse_match< SubPat > m_OneUse(const SubPat &SP)

bool match(Val *V, const Pattern &P)

specificval_ty m_Specific(const Value *V)

Match if we have a specific specified value.

TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)

Matches ExtractElementInst.

auto m_LogicalOr()

Matches L || R where L and R are arbitrary values.

class_match< Value > m_Value()

Match an arbitrary value and ignore it.

auto m_LogicalAnd()

Matches L && R where L and R are arbitrary values.

@ Implicit

Not emitted register (e.g. carry, or temporary result).

@ Undef

Value of the register doesn't matter.

Offsets

Offsets in bytes from the start of the input buffer.

void sortAndRangeify(CaseClusterVector &Clusters)

Sort Clusters and merge adjacent cases.

std::vector< CaseCluster > CaseClusterVector

@ CC_Range

A cluster of adjacent case labels with the same destination, or just one case.

@ CC_JumpTable

A cluster of cases suitable for jump table lowering.

@ CC_BitTests

A cluster of cases suitable for bit test lowering.

SmallVector< SwitchWorkListItem, 4 > SwitchWorkList

CaseClusterVector::iterator CaseClusterIt

@ CE

Windows NT (Windows on ARM)

initializer< Ty > init(const Ty &Val)

ExceptionBehavior

Exception behavior used for floating point operations.

@ ebIgnore

This corresponds to "fpexcept.ignore".

DiagnosticInfoOptimizationBase::Argument NV

NodeAddr< PhiNode * > Phi

NodeAddr< CodeNode * > Code

friend class Instruction

Iterator for Instructions in a `BasicBlock.

BaseReg

Stack frame base register. Bit 0 of FREInfo.Info.

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

@ Low

Lower the current thread's priority such that it does not affect foreground tasks significantly.

FunctionAddr VTableAddr Value

detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

int countr_one(T Value)

Count the number of ones from the least significant bit to the first zero bit.

FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty

LLVM_ABI void diagnoseDontCall(const CallInst &CI)

auto successors(const MachineBasicBlock *BB)

LLVM_ABI MVT getMVTForLLT(LLT Ty)

Get a rough equivalent of an MVT for a given LLT.

void append_range(Container &C, Range &&R)

Wrapper function to append range R to container C.

constexpr bool isUIntN(unsigned N, uint64_t x)

Checks if an unsigned integer fits into the given (dynamic) bit width.

gep_type_iterator gep_type_end(const User *GEP)

MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)

Find the split point at which to splice the end of BB into its success stack protector check machine ...

LLVM_ABI LLT getLLTForMVT(MVT Ty)

Get a rough equivalent of an LLT for a given MVT.

constexpr int popcount(T Value) noexcept

Count the number of set bits in a value.

int countr_zero(T Val)

Count number of 0's from the least significant bit to the most stopping at the first 1.

Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to infer an alignment for the specified pointer.

constexpr bool has_single_bit(T Value) noexcept

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)

Create a stride shuffle mask.

auto reverse(ContainerTy &&C)

void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)

computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...

void sort(IteratorTy Start, IteratorTy End)

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)

generic_gep_type_iterator<> gep_type_iterator

auto succ_size(const MachineBasicBlock *BB)

LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)

See if the given exception handling personality function is one that we understand.

CodeGenOptLevel

Code generation optimization level.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

@ Success

The lock was released successfully.

LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key

@ Global

Append to llvm.global_dtors.

@ First

Helpers to iterate all locations in the MemoryEffectsBase class.

LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)

Modify analysis usage so it preserves passes required for the SelectionDAG fallback.

auto lower_bound(R &&Range, T &&Value)

Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...

LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)

Create an interleave shuffle mask.

@ Sub

Subtraction of integers.

DWARFExpression::Operation Op

ArrayRef(const T &OneElt) -> ArrayRef< T >

bool isAsynchronousEHPersonality(EHPersonality Pers)

Returns true if this personality function catches asynchronous exceptions.

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)

Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...

gep_type_iterator gep_type_begin(const User *GEP)

GlobalValue * ExtractTypeInfo(Value *V)

ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.

Align commonAlignment(Align A, uint64_t Offset)

Returns the alignment that satisfies both alignments.

LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)

Construct a low-level type based on an LLVM type.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

constexpr uint64_t value() const

This is a hole in the type system and should not be abused.

bool isSimple() const

Test if the given EVT is simple (as opposed to being extended).

MVT getSimpleVT() const

Return the SimpleValueType held in the specified simple EVT.

LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const

This method returns an LLVM type corresponding to the specified EVT.

Pair of physical register and lane mask.

static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

MachineBasicBlock * Parent

This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...

BranchProbability TrueProb

MachineBasicBlock * ThisBB

struct PredInfoPair PredInfo

BranchProbability FalseProb

MachineBasicBlock * TrueBB

MachineBasicBlock * FalseBB

std::optional< unsigned > fallbackAddressSpace

MachineMemOperand::Flags flags

PointerUnion< const Value *, const PseudoSourceValue * > ptrVal

AtomicOrdering failureOrder