LLVM: lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

25#include "llvm/IR/IntrinsicsRISCV.h"

27

28#define DEBUG_TYPE "riscv-isel"

29

30using namespace llvm;

32

33#define GET_GLOBALISEL_PREDICATE_BITSET

34#include "RISCVGenGlobalISel.inc"

35#undef GET_GLOBALISEL_PREDICATE_BITSET

36

37namespace {

38

40public:

44

46

52 }

53

55

56private:

58 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;

59

60 static constexpr unsigned MaxRecursionDepth = 6;

61

63 const unsigned Depth = 0) const;

66 }

69 }

70

73

74

75

77

78

79

81

83

84

89 bool IsExternWeak = false) const;

97 unsigned &CurOp, bool IsMasked,

98 bool IsStridedOrIndexed,

99 LLT *IndexVT = nullptr) const;

100 bool selectIntrinsicWithSideEffects(MachineInstr &I,

103

104 ComplexRendererFns selectShiftMask(MachineOperand &Root,

105 unsigned ShiftWidth) const;

106 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {

107 return selectShiftMask(Root, STI.getXLen());

108 }

109 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {

110 return selectShiftMask(Root, 32);

111 }

112 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;

113

114 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;

115 template

116 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {

117 return selectSExtBits(Root, Bits);

118 }

119

120 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;

121 template

122 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {

123 return selectZExtBits(Root, Bits);

124 }

125

126 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;

127 template

128 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {

129 return selectSHXADDOp(Root, ShAmt);

130 }

131

132 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,

133 unsigned ShAmt) const;

134 template

135 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {

136 return selectSHXADD_UWOp(Root, ShAmt);

137 }

138

139 ComplexRendererFns renderVLOp(MachineOperand &Root) const;

140

141

143 int OpIdx) const;

145 int OpIdx) const;

147 int OpIdx) const;

149 int OpIdx) const;

151 int OpIdx) const;

152

154 int OpIdx) const;

157

159 int OpIdx) const;

161 int OpIdx) const;

162

168

170

171

172

173

175

176#define GET_GLOBALISEL_PREDICATES_DECL

177#include "RISCVGenGlobalISel.inc"

178#undef GET_GLOBALISEL_PREDICATES_DECL

179

180#define GET_GLOBALISEL_TEMPORARIES_DECL

181#include "RISCVGenGlobalISel.inc"

182#undef GET_GLOBALISEL_TEMPORARIES_DECL

183};

184

185}

186

187#define GET_GLOBALISEL_IMPL

188#include "RISCVGenGlobalISel.inc"

189#undef GET_GLOBALISEL_IMPL

190

191RISCVInstructionSelector::RISCVInstructionSelector(

194 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),

195 TM(TM),

196

198#include "RISCVGenGlobalISel.inc"

201#include "RISCVGenGlobalISel.inc"

203{

204}

205

206

207bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,

208 unsigned Bits,

209 const unsigned Depth) const {

210

211 assert((MI.getOpcode() == TargetOpcode::G_ADD ||

212 MI.getOpcode() == TargetOpcode::G_SUB ||

213 MI.getOpcode() == TargetOpcode::G_MUL ||

214 MI.getOpcode() == TargetOpcode::G_SHL ||

215 MI.getOpcode() == TargetOpcode::G_LSHR ||

216 MI.getOpcode() == TargetOpcode::G_AND ||

217 MI.getOpcode() == TargetOpcode::G_OR ||

218 MI.getOpcode() == TargetOpcode::G_XOR ||

219 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&

220 "Unexpected opcode");

221

222 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)

223 return false;

224

225 auto DestReg = MI.getOperand(0).getReg();

226 for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {

227 assert(UserOp.getParent() && "UserOp must have a parent");

228 const MachineInstr &UserMI = *UserOp.getParent();

230

232 default:

233 return false;

234 case RISCV::ADDW:

235 case RISCV::ADDIW:

236 case RISCV::SUBW:

237 case RISCV::FCVT_D_W:

238 case RISCV::FCVT_S_W:

239 if (Bits >= 32)

240 break;

241 return false;

242 case RISCV::SLL:

243 case RISCV::SRA:

244 case RISCV::SRL:

245

247 break;

248 return false;

249 case RISCV::SLLI:

250

252 break;

253 return false;

254 case RISCV::ANDI:

257 break;

258 goto RecCheck;

259 case RISCV::AND:

260 case RISCV::OR:

261 case RISCV::XOR:

262 RecCheck:

264 break;

265 return false;

266 case RISCV::SRLI: {

268

269

270

272 break;

273 return false;

274 }

275 }

276 }

277

278 return true;

279}

280

281InstructionSelector::ComplexRendererFns

282RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,

283 unsigned ShiftWidth) const {

284 if (!Root.isReg())

285 return std::nullopt;

286

287 using namespace llvm::MIPatternMatch;

288

290

293 ShAmtReg = ZExtSrcReg;

294

295 APInt AndMask;

297

298

299

300

301

302

303

304

305

306

307

308

309

310

312 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);

313 if (ShMask.isSubsetOf(AndMask)) {

314 ShAmtReg = AndSrcReg;

315 } else {

316

317

318 KnownBits Known = VT->getKnownBits(AndSrcReg);

319 if (ShMask.isSubsetOf(AndMask | Known.Zero))

320 ShAmtReg = AndSrcReg;

321 }

322 }

323

324 APInt Imm;

327 if (Imm != 0 && Imm.urem(ShiftWidth) == 0)

328

329

330 ShAmtReg = Reg;

332 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {

333

334

335 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);

336 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;

337 return {{[=](MachineInstrBuilder &MIB) {

338 MachineIRBuilder(*MIB.getInstr())

339 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});

340 MIB.addReg(ShAmtReg);

341 }}};

342 }

343 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {

344

345

346 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);

347 return {{[=](MachineInstrBuilder &MIB) {

348 MachineIRBuilder(*MIB.getInstr())

349 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})

350 .addImm(-1);

351 MIB.addReg(ShAmtReg);

352 }}};

353 }

354 }

355

356 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};

357}

358

359InstructionSelector::ComplexRendererFns

360RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,

361 unsigned Bits) const {

362 if (!Root.isReg())

363 return std::nullopt;

365 MachineInstr *RootDef = MRI->getVRegDef(RootReg);

366

367 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&

369 return {

370 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};

371 }

372

373 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();

374 if ((Size - VT->computeNumSignBits(RootReg)) < Bits)

375 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};

376

377 return std::nullopt;

378}

379

380InstructionSelector::ComplexRendererFns

381RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,

382 unsigned Bits) const {

383 if (!Root.isReg())

384 return std::nullopt;

386

390 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};

391 }

392

394 MRI->getType(RegX).getScalarSizeInBits() == Bits)

395 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};

396

397 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();

399 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};

400

401 return std::nullopt;

402}

403

404InstructionSelector::ComplexRendererFns

405RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,

406 unsigned ShAmt) const {

407 using namespace llvm::MIPatternMatch;

408

409 if (!Root.isReg())

410 return std::nullopt;

412

413 const unsigned XLen = STI.getXLen();

414 APInt Mask, C2;

417

421

425

427 if (*LeftShift)

429 else

431

432 if (Mask.isShiftedMask()) {

433 unsigned Leading = XLen - Mask.getActiveBits();

434 unsigned Trailing = Mask.countr_zero();

435

436

437 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {

438 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);

439 return {{[=](MachineInstrBuilder &MIB) {

440 MachineIRBuilder(*MIB.getInstr())

441 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})

443 MIB.addReg(DstReg);

444 }}};

445 }

446

447

448

449 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {

450 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);

451 return {{[=](MachineInstrBuilder &MIB) {

452 MachineIRBuilder(*MIB.getInstr())

453 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})

454 .addImm(Leading + Trailing);

455 MIB.addReg(DstReg);

456 }}};

457 }

458 }

459 }

460

462

463

468

473

474 if (LeftShift.has_value() && Mask.isShiftedMask()) {

475 unsigned Leading = XLen - Mask.getActiveBits();

476 unsigned Trailing = Mask.countr_zero();

477

478

479

480 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&

483

484

487

489 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);

490 return {{[=](MachineInstrBuilder &MIB) {

491 MachineIRBuilder(*MIB.getInstr())

492 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})

493 .addImm(Trailing);

494 MIB.addReg(DstReg);

495 }}};

496 }

497 }

498

499 return std::nullopt;

500}

501

502InstructionSelector::ComplexRendererFns

503RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,

504 unsigned ShAmt) const {

505 using namespace llvm::MIPatternMatch;

506

507 if (!Root.isReg())

508 return std::nullopt;

510

511

512

513

514 APInt Mask, C2;

517 RootReg, *MRI,

521

522 if (Mask.isShiftedMask()) {

523 unsigned Leading = Mask.countl_zero();

524 unsigned Trailing = Mask.countr_zero();

525 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {

526 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);

527 return {{[=](MachineInstrBuilder &MIB) {

528 MachineIRBuilder(*MIB.getInstr())

529 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})

531 MIB.addReg(DstReg);

532 }}};

533 }

534 }

535 }

536

537 return std::nullopt;

538}

539

540InstructionSelector::ComplexRendererFns

541RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {

542 assert(Root.isReg() && "Expected operand to be a Register");

543 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());

544

545 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {

547 if (C->getValue().isAllOnes())

548

549

550

551 return {{[=](MachineInstrBuilder &MIB) {

553 }}};

554

556 uint64_t ZExtC = C->getZExtValue();

557 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};

558 }

559 }

560 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};

561}

562

563InstructionSelector::ComplexRendererFns

564RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {

565 if (!Root.isReg())

566 return std::nullopt;

567

568 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());

569 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {

570 return {{

571 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },

572 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },

573 }};

574 }

575

576 if (isBaseWithConstantOffset(Root, *MRI)) {

579 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());

580 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());

581

584 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)

585 return {{

586 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },

587 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },

588 }};

589

590 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },

591 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};

592 }

593 }

594

595

596

597 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },

598 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};

599}

600

601

602

604 switch (CC) {

605 default:

607 case CmpInst::Predicate::ICMP_EQ:

609 case CmpInst::Predicate::ICMP_NE:

611 case CmpInst::Predicate::ICMP_ULT:

613 case CmpInst::Predicate::ICMP_SLT:

615 case CmpInst::Predicate::ICMP_UGE:

617 case CmpInst::Predicate::ICMP_SGE:

619 }

620}

621

625

628 LHS = CondReg;

629 RHS = RISCV::X0;

631 return;

632 }

633

634

635

636

638 switch (Pred) {

640

643 RHS = RISCV::X0;

644 return;

645 }

646 break;

648

652 LHS = RISCV::X0;

653 return;

654 }

655 break;

656 default:

657 break;

658 }

659 }

660

661 switch (Pred) {

662 default:

670

671 break;

676

677

680 break;

681 }

682

683 CC = getRISCVCCFromICmp(Pred);

684}

685

686

687

688

690 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;

691 switch (OpSize) {

692 default:

694 case 8:

695 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;

696 case 16:

697 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;

698 case 32:

699 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;

700 case 64:

701 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;

702 }

703}

704

705

706

707

709 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;

710 switch (OpSize) {

711 case 8:

712

713 return IsStore ? RISCV::SB : RISCV::LBU;

714 case 16:

715 return IsStore ? RISCV::SH : RISCV::LH;

716 case 32:

717 return IsStore ? RISCV::SW : RISCV::LW;

718 case 64:

719 return IsStore ? RISCV::SD : RISCV::LD;

720 }

721

722 return GenericOpc;

723}

724

725void RISCVInstructionSelector::addVectorLoadStoreOperands(

726 MachineInstr &I, SmallVectorImpl &SrcOps, unsigned &CurOp,

727 bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const {

728

729 auto PtrReg = I.getOperand(CurOp++).getReg();

731

732

733 if (IsStridedOrIndexed) {

734 auto StrideReg = I.getOperand(CurOp++).getReg();

736 if (IndexVT)

737 *IndexVT = MRI->getType(StrideReg);

738 }

739

740

741 if (IsMasked) {

742 auto MaskReg = I.getOperand(CurOp++).getReg();

744 }

745}

746

747bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(

748 MachineInstr &I, MachineIRBuilder &MIB) const {

749

751

752 switch (IntrinID) {

753 default:

754 return false;

755 case Intrinsic::riscv_vlm:

756 case Intrinsic::riscv_vle:

757 case Intrinsic::riscv_vle_mask:

758 case Intrinsic::riscv_vlse:

759 case Intrinsic::riscv_vlse_mask: {

760 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||

761 IntrinID == Intrinsic::riscv_vlse_mask;

762 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||

763 IntrinID == Intrinsic::riscv_vlse_mask;

764 LLT VT = MRI->getType(I.getOperand(0).getReg());

766

767

768 const Register DstReg = I.getOperand(0).getReg();

769

770

771 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;

772 unsigned CurOp = 2;

774

775

776 if (HasPassthruOperand) {

777 auto PassthruReg = I.getOperand(CurOp++).getReg();

779 } else {

781 }

782

783 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);

784

786 const RISCV::VLEPseudo *P =

787 RISCV::getVLEPseudo(IsMasked, IsStrided, false, Log2SEW,

788 static_cast<unsigned>(LMUL));

789

790 auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);

791

792

793 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));

794 for (auto &RenderFn : *VLOpFn)

795 RenderFn(PseudoMI);

796

797

798 PseudoMI.addImm(Log2SEW);

799

800

802 if (IsMasked)

803 Policy = I.getOperand(CurOp++).getImm();

804 PseudoMI.addImm(Policy);

805

806

807 PseudoMI.cloneMemRefs(I);

808

809 I.eraseFromParent();

811 }

812 case Intrinsic::riscv_vloxei:

813 case Intrinsic::riscv_vloxei_mask:

814 case Intrinsic::riscv_vluxei:

815 case Intrinsic::riscv_vluxei_mask: {

816 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||

817 IntrinID == Intrinsic::riscv_vluxei_mask;

818 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||

819 IntrinID == Intrinsic::riscv_vloxei_mask;

820 LLT VT = MRI->getType(I.getOperand(0).getReg());

822

823

824 const Register DstReg = I.getOperand(0).getReg();

825

826

827 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;

828 unsigned CurOp = 2;

830

831

832 if (HasPassthruOperand) {

833 auto PassthruReg = I.getOperand(CurOp++).getReg();

835 } else {

836

838 }

839 LLT IndexVT;

840 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);

841

846 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {

848 "values when XLEN=32");

849 }

850 const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(

851 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),

852 static_cast<unsigned>(IndexLMUL));

853

854 auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);

855

856

857 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));

858 for (auto &RenderFn : *VLOpFn)

859 RenderFn(PseudoMI);

860

861

862 PseudoMI.addImm(Log2SEW);

863

864

866 if (IsMasked)

867 Policy = I.getOperand(CurOp++).getImm();

868 PseudoMI.addImm(Policy);

869

870

871 PseudoMI.cloneMemRefs(I);

872

873 I.eraseFromParent();

875 }

876 case Intrinsic::riscv_vsm:

877 case Intrinsic::riscv_vse:

878 case Intrinsic::riscv_vse_mask:

879 case Intrinsic::riscv_vsse:

880 case Intrinsic::riscv_vsse_mask: {

881 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||

882 IntrinID == Intrinsic::riscv_vsse_mask;

883 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||

884 IntrinID == Intrinsic::riscv_vsse_mask;

885 LLT VT = MRI->getType(I.getOperand(1).getReg());

887

888

889 unsigned CurOp = 1;

891

892

893 auto PassthruReg = I.getOperand(CurOp++).getReg();

895

896 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);

897

899 const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(

900 IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));

901

902 auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps);

903

904

905 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));

906 for (auto &RenderFn : *VLOpFn)

907 RenderFn(PseudoMI);

908

909

910 PseudoMI.addImm(Log2SEW);

911

912

913 PseudoMI.cloneMemRefs(I);

914

915 I.eraseFromParent();

917 }

918 case Intrinsic::riscv_vsoxei:

919 case Intrinsic::riscv_vsoxei_mask:

920 case Intrinsic::riscv_vsuxei:

921 case Intrinsic::riscv_vsuxei_mask: {

922 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||

923 IntrinID == Intrinsic::riscv_vsuxei_mask;

924 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||

925 IntrinID == Intrinsic::riscv_vsoxei_mask;

926 LLT VT = MRI->getType(I.getOperand(1).getReg());

928

929

930 unsigned CurOp = 1;

932

933

934 auto PassthruReg = I.getOperand(CurOp++).getReg();

936

937 LLT IndexVT;

938 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);

939

944 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {

946 "values when XLEN=32");

947 }

948 const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(

949 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),

950 static_cast<unsigned>(IndexLMUL));

951

952 auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps);

953

954

955 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));

956 for (auto &RenderFn : *VLOpFn)

957 RenderFn(PseudoMI);

958

959

960 PseudoMI.addImm(Log2SEW);

961

962

963 PseudoMI.cloneMemRefs(I);

964

965 I.eraseFromParent();

967 }

968 }

969}

970

971bool RISCVInstructionSelector::selectExtractSubvector(

972 MachineInstr &MI, MachineIRBuilder &MIB) const {

973 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);

974

975 Register DstReg = MI.getOperand(0).getReg();

976 Register SrcReg = MI.getOperand(1).getReg();

977

978 LLT DstTy = MRI->getType(DstReg);

979 LLT SrcTy = MRI->getType(SrcReg);

980

981 unsigned Idx = static_cast<unsigned>(MI.getOperand(2).getImm());

982

985

986 unsigned SubRegIdx;

987 std::tie(SubRegIdx, Idx) =

989 SrcMVT, DstMVT, Idx, &TRI);

990

991 if (Idx != 0)

992 return false;

993

995 const TargetRegisterClass *DstRC = TRI.getRegClass(DstRegClassID);

997 return false;

998

1000 const TargetRegisterClass *SrcRC = TRI.getRegClass(SrcRegClassID);

1002 return false;

1003

1004 MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {}).addReg(SrcReg, 0, SubRegIdx);

1005

1006 MI.eraseFromParent();

1007 return true;

1008}

1009

1010bool RISCVInstructionSelector::select(MachineInstr &MI) {

1011 MachineIRBuilder MIB(MI);

1012

1013 preISelLower(MI, MIB);

1014 const unsigned Opc = MI.getOpcode();

1015

1016 if (MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {

1017 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {

1018 const Register DefReg = MI.getOperand(0).getReg();

1019 const LLT DefTy = MRI->getType(DefReg);

1020

1022 MRI->getRegClassOrRegBank(DefReg);

1023

1024 const TargetRegisterClass *DefRC =

1026 if (!DefRC) {

1028 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");

1029 return false;

1030 }

1031

1033 DefRC = getRegClassForTypeOnBank(DefTy, RB);

1034 if (!DefRC) {

1035 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");

1036 return false;

1037 }

1038 }

1039

1040 MI.setDesc(TII.get(TargetOpcode::PHI));

1042 }

1043

1044

1045 if (MI.isCopy())

1047

1048 return true;

1049 }

1050

1051 if (selectImpl(MI, *CoverageInfo))

1052 return true;

1053

1054 switch (Opc) {

1055 case TargetOpcode::G_ANYEXT:

1056 case TargetOpcode::G_PTRTOINT:

1057 case TargetOpcode::G_INTTOPTR:

1058 case TargetOpcode::G_TRUNC:

1059 case TargetOpcode::G_FREEZE:

1061 case TargetOpcode::G_CONSTANT: {

1062 Register DstReg = MI.getOperand(0).getReg();

1063 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();

1064

1065 if (!materializeImm(DstReg, Imm, MIB))

1066 return false;

1067

1068 MI.eraseFromParent();

1069 return true;

1070 }

1071 case TargetOpcode::G_ZEXT:

1072 case TargetOpcode::G_SEXT: {

1073 bool IsSigned = Opc != TargetOpcode::G_ZEXT;

1074 Register DstReg = MI.getOperand(0).getReg();

1075 Register SrcReg = MI.getOperand(1).getReg();

1076 LLT SrcTy = MRI->getType(SrcReg);

1078

1080 return false;

1081

1083 RISCV::GPRBRegBankID &&

1084 "Unexpected ext regbank");

1085

1086

1087 if (IsSigned && SrcSize == 32) {

1088 MI.setDesc(TII.get(RISCV::ADDIW));

1091 }

1092

1093

1094 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {

1095 MI.setDesc(TII.get(RISCV::ADD_UW));

1098 }

1099

1100

1101 if (SrcSize == 16 && STI.hasStdExtZbb()) {

1102 MI.setDesc(TII.get(IsSigned ? RISCV::SEXT_H

1103 : STI.isRV64() ? RISCV::ZEXT_H_RV64

1104 : RISCV::ZEXT_H_RV32));

1106 }

1107

1108

1109 if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {

1110 MI.setDesc(TII.get(STI.is64Bit() ? RISCV::PACKW : RISCV::PACK));

1113 }

1114

1115

1116 auto ShiftLeft =

1117 MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})

1118 .addImm(STI.getXLen() - SrcSize);

1120 auto ShiftRight = MIB.buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,

1121 {DstReg}, {ShiftLeft})

1122 .addImm(STI.getXLen() - SrcSize);

1124 MI.eraseFromParent();

1125 return true;

1126 }

1127 case TargetOpcode::G_FCONSTANT: {

1128

1129 Register DstReg = MI.getOperand(0).getReg();

1130 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();

1131 unsigned Size = MRI->getType(DstReg).getSizeInBits();

1135 GPRReg = RISCV::X0;

1136 } else {

1137 GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);

1139 if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))

1140 return false;

1141 }

1142

1143 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X

1144 : Size == 32 ? RISCV::FMV_W_X

1145 : RISCV::FMV_H_X;

1146 auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});

1147 if (!FMV.constrainAllUses(TII, TRI, RBI))

1148 return false;

1149 } else {

1150

1152 "Unexpected size or subtarget");

1153

1155

1156 MachineInstrBuilder FCVT =

1160 return false;

1161

1162 MI.eraseFromParent();

1163 return true;

1164 }

1165

1166

1167 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);

1168 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);

1170 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),

1171 MIB))

1172 return false;

1173 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))

1174 return false;

1175 MachineInstrBuilder PairF64 = MIB.buildInstr(

1176 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});

1178 return false;

1179 }

1180

1181 MI.eraseFromParent();

1182 return true;

1183 }

1184 case TargetOpcode::G_GLOBAL_VALUE: {

1185 auto *GV = MI.getOperand(1).getGlobal();

1186 if (GV->isThreadLocal()) {

1187

1188 return false;

1189 }

1190

1191 return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());

1192 }

1193 case TargetOpcode::G_JUMP_TABLE:

1194 case TargetOpcode::G_CONSTANT_POOL:

1195 return selectAddr(MI, MIB, MRI);

1196 case TargetOpcode::G_BRCOND: {

1200

1202 .addMBB(MI.getOperand(1).getMBB());

1203 MI.eraseFromParent();

1205 }

1206 case TargetOpcode::G_BRINDIRECT:

1207 MI.setDesc(TII.get(RISCV::PseudoBRIND));

1210 case TargetOpcode::G_SELECT:

1211 return selectSelect(MI, MIB);

1212 case TargetOpcode::G_FCMP:

1213 return selectFPCompare(MI, MIB);

1214 case TargetOpcode::G_FENCE: {

1219 emitFence(FenceOrdering, FenceSSID, MIB);

1220 MI.eraseFromParent();

1221 return true;

1222 }

1223 case TargetOpcode::G_IMPLICIT_DEF:

1224 return selectImplicitDef(MI, MIB);

1225 case TargetOpcode::G_UNMERGE_VALUES:

1227 case TargetOpcode::G_LOAD:

1228 case TargetOpcode::G_STORE: {

1232 LLT PtrTy = MRI->getType(PtrReg);

1233

1234 const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);

1235 if (RB.getID() != RISCV::GPRBRegBankID)

1236 return false;

1237

1238#ifndef NDEBUG

1239 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);

1240

1241 assert(PtrRB.getID() == RISCV::GPRBRegBankID &&

1242 "Load/Store pointer operand isn't a GPR");

1243 assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");

1244#endif

1245

1246

1248 return false;

1249

1252

1256 }

1257

1259 if (NewOpc == MI.getOpcode())

1260 return false;

1261

1262

1263 auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));

1264 if (!AddrModeFns)

1265 return false;

1266

1267

1268 auto NewInst = MIB.buildInstr(NewOpc, {}, {}, MI.getFlags());

1270 NewInst.addUse(ValReg);

1271 else

1272 NewInst.addDef(ValReg);

1274 for (auto &Fn : *AddrModeFns)

1275 Fn(NewInst);

1276 MI.eraseFromParent();

1277

1279 }

1280 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:

1281 return selectIntrinsicWithSideEffects(MI, MIB);

1282 case TargetOpcode::G_EXTRACT_SUBVECTOR:

1283 return selectExtractSubvector(MI, MIB);

1284 default:

1285 return false;

1286 }

1287}

1288

1289bool RISCVInstructionSelector::selectUnmergeValues(

1290 MachineInstr &MI, MachineIRBuilder &MIB) const {

1291 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);

1292

1293 if (!Subtarget->hasStdExtZfa())

1294 return false;

1295

1296

1297 if (MI.getNumOperands() != 3)

1298 return false;

1299 Register Src = MI.getOperand(2).getReg();

1302 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))

1303 return false;

1304

1305 MachineInstr *ExtractLo = MIB.buildInstr(RISCV::FMV_X_W_FPR64, {Lo}, {Src});

1307 return false;

1308

1309 MachineInstr *ExtractHi = MIB.buildInstr(RISCV::FMVH_X_D, {Hi}, {Src});

1311 return false;

1312

1313 MI.eraseFromParent();

1314 return true;

1315}

1316

1317bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,

1318 MachineIRBuilder &MIB) {

1320 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");

1321

1323 auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);

1324 MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));

1325 Op.setReg(PtrToInt.getReg(0));

1326 return select(*PtrToInt);

1327}

1328

1329void RISCVInstructionSelector::preISelLower(MachineInstr &MI,

1330 MachineIRBuilder &MIB) {

1331 switch (MI.getOpcode()) {

1332 case TargetOpcode::G_PTR_ADD: {

1333 Register DstReg = MI.getOperand(0).getReg();

1335

1336 replacePtrWithInt(MI.getOperand(1), MIB);

1337 MI.setDesc(TII.get(TargetOpcode::G_ADD));

1338 MRI->setType(DstReg, sXLen);

1339 break;

1340 }

1341 case TargetOpcode::G_PTRMASK: {

1342 Register DstReg = MI.getOperand(0).getReg();

1344 replacePtrWithInt(MI.getOperand(1), MIB);

1345 MI.setDesc(TII.get(TargetOpcode::G_AND));

1346 MRI->setType(DstReg, sXLen);

1347 break;

1348 }

1349 }

1350}

1351

1352void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,

1353 const MachineInstr &MI,

1354 int OpIdx) const {

1355 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&

1356 "Expected G_CONSTANT");

1357 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();

1358 MIB.addImm(-CstVal);

1359}

1360

1361void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,

1362 const MachineInstr &MI,

1363 int OpIdx) const {

1364 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&

1365 "Expected G_CONSTANT");

1366 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();

1368}

1369

1370void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,

1371 const MachineInstr &MI,

1372 int OpIdx) const {

1373 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&

1374 "Expected G_CONSTANT");

1375 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();

1376 MIB.addImm(32 - CstVal);

1377}

1378

1379void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,

1380 const MachineInstr &MI,

1381 int OpIdx) const {

1382 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&

1383 "Expected G_CONSTANT");

1384 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();

1385 MIB.addImm(CstVal + 1);

1386}

1387

1388void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,

1389 const MachineInstr &MI,

1390 int OpIdx) const {

1391 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&

1392 "Expected G_FRAME_INDEX");

1393 MIB.add(MI.getOperand(1));

1394}

1395

1396void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,

1397 const MachineInstr &MI,

1398 int OpIdx) const {

1399 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&

1400 "Expected G_CONSTANT");

1401 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();

1403}

1404

1405void RISCVInstructionSelector::renderXLenSubTrailingOnes(

1406 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {

1407 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&

1408 "Expected G_CONSTANT");

1409 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();

1411}

1412

1413void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,

1414 const MachineInstr &MI,

1415 int OpIdx) const {

1416 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&

1417 "Expected G_CONSTANT");

1418 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();

1419 int64_t Adj = Imm < 0 ? -2048 : 2047;

1420 MIB.addImm(Imm - Adj);

1421}

1422

1423void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,

1424 const MachineInstr &MI,

1425 int OpIdx) const {

1426 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&

1427 "Expected G_CONSTANT");

1428 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;

1430}

1431

1432const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(

1433 LLT Ty, const RegisterBank &RB) const {

1434 if (RB.getID() == RISCV::GPRBRegBankID) {

1436 return &RISCV::GPRRegClass;

1437 }

1438

1439 if (RB.getID() == RISCV::FPRBRegBankID) {

1441 return &RISCV::FPR16RegClass;

1443 return &RISCV::FPR32RegClass;

1445 return &RISCV::FPR64RegClass;

1446 }

1447

1448 if (RB.getID() == RISCV::VRBRegBankID) {

1450 return &RISCV::VRRegClass;

1451

1453 return &RISCV::VRM2RegClass;

1454

1456 return &RISCV::VRM4RegClass;

1457

1459 return &RISCV::VRM8RegClass;

1460 }

1461

1462 return nullptr;

1463}

1464

1465bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {

1467}

1468

1469bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {

1471}

1472

1473bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {

1474 Register DstReg = MI.getOperand(0).getReg();

1475

1477 return true;

1478

1479 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(

1482 "Register class not available for LLT, register bank combination");

1483

1484

1485

1486

1489 << " operand\n");

1490 return false;

1491 }

1492

1493 MI.setDesc(TII.get(RISCV::COPY));

1494 return true;

1495}

1496

1497bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,

1498 MachineIRBuilder &MIB) const {

1499 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);

1500

1501 const Register DstReg = MI.getOperand(0).getReg();

1502 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(

1504

1506 "Register class not available for LLT, register bank combination");

1507

1510 << " operand\n");

1511 }

1512 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));

1513 return true;

1514}

1515

1516bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,

1517 MachineIRBuilder &MIB) const {

1518 if (Imm == 0) {

1521 return true;

1522 }

1523

1525 unsigned NumInsts = Seq.size();

1526 Register SrcReg = RISCV::X0;

1527

1528 for (unsigned i = 0; i < NumInsts; i++) {

1529 Register TmpReg = i < NumInsts - 1

1530 ? MRI->createVirtualRegister(&RISCV::GPRRegClass)

1531 : DstReg;

1532 const RISCVMatInt::Inst &I = Seq[i];

1533 MachineInstr *Result;

1534

1535 switch (I.getOpndKind()) {

1537

1540

1541 break;

1544 {SrcReg, Register(RISCV::X0)});

1545 break;

1547 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});

1548 break;

1551 MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());

1552 break;

1553 }

1554

1556 return false;

1557

1558 SrcReg = TmpReg;

1559 }

1560

1561 return true;

1562}

1563

1564bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,

1565 MachineIRBuilder &MIB, bool IsLocal,

1566 bool IsExternWeak) const {

1567 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||

1568 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||

1569 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&

1570 "Unexpected opcode");

1571

1572 const MachineOperand &DispMO = MI.getOperand(1);

1573

1574 Register DefReg = MI.getOperand(0).getReg();

1575 const LLT DefTy = MRI->getType(DefReg);

1576

1577

1578

1579

1580

1582 if (IsLocal && !Subtarget->allowTaggedGlobals()) {

1583

1584

1585

1586 MI.setDesc(TII.get(RISCV::PseudoLLA));

1588 }

1589

1590

1591

1592

1593

1594 MachineFunction &MF = *MI.getParent()->getParent();

1600

1601 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})

1602 .addDisp(DispMO, 0)

1604

1606 return false;

1607

1608 MI.eraseFromParent();

1609 return true;

1610 }

1611

1613 default: {

1615 "Unsupported code model for lowering", MI);

1616 return false;

1617 }

1619

1620

1621

1622 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);

1623 MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})

1625

1627 return false;

1628

1629 auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})

1631

1633 return false;

1634

1635 MI.eraseFromParent();

1636 return true;

1637 }

1639

1640

1641

1642

1643 if (IsExternWeak) {

1644

1645

1646

1647

1648 MachineFunction &MF = *MI.getParent()->getParent();

1654

1655 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})

1656 .addDisp(DispMO, 0)

1658

1660 return false;

1661

1662 MI.eraseFromParent();

1663 return true;

1664 }

1665

1666

1667

1668

1669 MI.setDesc(TII.get(RISCV::PseudoLLA));

1671 }

1672

1673 return false;

1674}

1675

1676bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,

1677 MachineIRBuilder &MIB) const {

1679

1683

1684 Register DstReg = SelectMI.getReg(0);

1685

1686 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;

1688 unsigned Size = MRI->getType(DstReg).getSizeInBits();

1689 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR

1690 : RISCV::Select_FPR64_Using_CC_GPR;

1691 }

1692

1698 .addReg(SelectMI.getTrueReg())

1699 .addReg(SelectMI.getFalseReg());

1700 MI.eraseFromParent();

1702}

1703

1704

1706 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");

1707 switch (Pred) {

1708 default:

1711 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;

1713 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;

1715 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;

1716 }

1717}

1718

1719

1720

1726 };

1727

1728 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");

1729

1731 if (isLegalFCmpPredicate(InvPred)) {

1732 Pred = InvPred;

1734 return true;

1735 }

1736

1738 NeedInvert = true;

1739 if (isLegalFCmpPredicate(InvPred)) {

1740 Pred = InvPred;

1741 return true;

1742 }

1744 if (isLegalFCmpPredicate(InvPred)) {

1745 Pred = InvPred;

1747 return true;

1748 }

1749

1750 return false;

1751}

1752

1753

1754

1755

1756bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,

1757 MachineIRBuilder &MIB) const {

1760

1761 Register DstReg = CmpMI.getReg(0);

1764

1765 unsigned Size = MRI->getType(LHS).getSizeInBits();

1766 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");

1767

1769

1770 bool NeedInvert = false;

1771

1773 if (NeedInvert)

1774 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);

1776 if (Cmp.constrainAllUses(TII, TRI, RBI))

1777 return false;

1779

1782 {&RISCV::GPRRegClass}, {LHS, RHS});

1783 if (!Cmp1.constrainAllUses(TII, TRI, RBI))

1784 return false;

1786 {&RISCV::GPRRegClass}, {RHS, LHS});

1787 if (!Cmp2.constrainAllUses(TII, TRI, RBI))

1788 return false;

1789 if (NeedInvert)

1790 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);

1791 auto Or =

1792 MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});

1793 if (Or.constrainAllUses(TII, TRI, RBI))

1794 return false;

1796

1797

1800 {&RISCV::GPRRegClass}, {LHS, LHS});

1801 if (!Cmp1.constrainAllUses(TII, TRI, RBI))

1802 return false;

1804 {&RISCV::GPRRegClass}, {RHS, RHS});

1805 if (!Cmp2.constrainAllUses(TII, TRI, RBI))

1806 return false;

1807 if (NeedInvert)

1808 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);

1809 auto And =

1810 MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});

1811 if (And.constrainAllUses(TII, TRI, RBI))

1812 return false;

1813 } else

1815

1816

1817 if (NeedInvert) {

1818 auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);

1819 if (Xor.constrainAllUses(TII, TRI, RBI))

1820 return false;

1821 }

1822

1823 MI.eraseFromParent();

1824 return true;

1825}

1826

1827void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,

1829 MachineIRBuilder &MIB) const {

1830 if (STI.hasStdExtZtso()) {

1831

1832

1833 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&

1835

1836 MIB.buildInstr(RISCV::FENCE, {}, {})

1839 return;

1840 }

1841

1842

1843 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});

1844 return;

1845 }

1846

1847

1848

1849

1851 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});

1852 return;

1853 }

1854

1855

1856

1857 unsigned Pred, Succ;

1858 switch (FenceOrdering) {

1859 default:

1861 case AtomicOrdering::AcquireRelease:

1862

1863 MIB.buildInstr(RISCV::FENCE_TSO, {}, {});

1864 return;

1865 case AtomicOrdering::Acquire:

1866

1869 break;

1870 case AtomicOrdering::Release:

1871

1874 break;

1875 case AtomicOrdering::SequentiallyConsistent:

1876

1879 break;

1880 }

1881 MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);

1882}

1883

1884namespace llvm {

1885InstructionSelector *

1889 return new RISCVInstructionSelector(TM, Subtarget, RBI);

1890}

1891}

unsigned const MachineRegisterInfo * MRI

#define GET_GLOBALISEL_PREDICATES_INIT

#define GET_GLOBALISEL_TEMPORARIES_INIT

static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)

Provides analysis for querying information about KnownBits during GISel passes.

Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...

static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)

static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)

Contains matchers for matching SSA Machine Instructions.

This file declares the MachineIRBuilder class.

Register const TargetRegisterInfo * TRI

Promote Memory to Register

MachineInstr unsigned OpIdx

static StringRef getName(Value *V)

static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)

Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...

Definition RISCVInstructionSelector.cpp:708

static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)

Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...

Definition RISCVInstructionSelector.cpp:689

static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)

Definition RISCVInstructionSelector.cpp:1705

static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)

Definition RISCVInstructionSelector.cpp:1721

static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)

Definition RISCVInstructionSelector.cpp:622

const SmallVectorImpl< MachineOperand > & Cond

This file declares the targeting of the RegisterBankInfo class for RISC-V.

APInt bitcastToAPInt() const

unsigned getBitWidth() const

Return the number of bits in the APInt.

bool ult(const APInt &RHS) const

Unsigned less than comparison.

uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const

If this value is smaller than the specified limit, return it, otherwise return the limit value.

static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)

Constructs an APInt value that has a contiguous range of bits set.

BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

@ FCMP_OEQ

0 0 0 1 True if ordered and equal

@ ICMP_SLT

signed less than

@ ICMP_SLE

signed less or equal

@ FCMP_OLT

0 1 0 0 True if ordered and less than

@ ICMP_UGE

unsigned greater or equal

@ ICMP_UGT

unsigned greater than

@ ICMP_SGT

signed greater than

@ FCMP_ONE

0 1 1 0 True if ordered and operands are unequal

@ FCMP_UEQ

1 0 0 1 True if unordered or equal

@ ICMP_ULT

unsigned less than

@ FCMP_OLE

0 1 0 1 True if ordered and less than or equal

@ FCMP_ORD

0 1 1 1 True if ordered (no nans)

@ ICMP_SGE

signed greater or equal

@ ICMP_ULE

unsigned less or equal

@ FCMP_UNO

1 0 0 0 True if unordered: isnan(X) | isnan(Y)

Predicate getSwappedPredicate() const

For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.

Predicate getInversePredicate() const

For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...

int64_t getSExtValue() const

Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...

This is an important base class in LLVM.

virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)

Setup per-MF executor state.

Register getPointerReg() const

Get the source register of the pointer value.

MachineMemOperand & getMMO() const

Get the MachineMemOperand on this instruction.

LocationSize getMemSizeInBits() const

Returns the size in bits of the memory access.

Register getReg(unsigned Idx) const

Access the Idx'th operand as a register and return it.

constexpr unsigned getScalarSizeInBits() const

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr bool isValid() const

constexpr bool isVector() const

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr bool isPointer() const

constexpr unsigned getAddressSpace() const

TypeSize getValue() const

const MCInstrDesc & get(unsigned Opcode) const

Return the machine instruction descriptor that corresponds to the specified instruction opcode.

StringRef getName(unsigned Opcode) const

Returns the name for the instructions with the given opcode.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

Helper class to build MachineInstr.

MachineInstrBuilder buildInstr(unsigned Opcode)

Build and insert = Opcode .

MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)

Build and insert Res = COPY Op.

MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)

Build and insert a G_PTRTOINT instruction.

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & add(const MachineOperand &MO) const

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const

const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const

const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register use operand.

const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const

const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register definition operand.

Representation of each machine instruction.

unsigned getOpcode() const

Returns the opcode of this MachineInstr.

unsigned getOperandNo(const_mop_iterator I) const

Returns the number of the operand iterator I points to.

const MachineOperand & getOperand(unsigned i) const

@ MODereferenceable

The memory access is dereferenceable (i.e., doesn't trap).

@ MOLoad

The memory access reads data.

@ MOInvariant

The memory access always returns the same value (or traps).

AtomicOrdering getSuccessOrdering() const

Return the atomic ordering requirements for this memory operation.

MachineOperand class - Representation of each machine instruction operand.

const ConstantInt * getCImm() const

bool isReg() const

isReg - Tests if this is a MO_Register operand.

static MachineOperand CreateImm(int64_t Val)

Register getReg() const

getReg - Returns the register number.

static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

Analysis providing profile information.

This class provides the information for the target register banks.

static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)

static unsigned getRegClassIDForVecVT(MVT VT)

static RISCVVType::VLMUL getLMUL(MVT VT)

static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)

Constrain the (possibly generic) virtual register Reg to RC.

const RegisterBank & getRegBank(unsigned ID)

Get the register bank identified by ID.

This class implements the register bank concept.

unsigned getID() const

Get the identifier of this register bank.

Wrapper class representing virtual and physical registers.

constexpr bool isPhysical() const

Return true if the specified register number is in the physical register namespace.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

bool isPositionIndependent() const

CodeModel::Model getCodeModel() const

Returns the code model.

constexpr ScalarTy getKnownMinValue() const

Returns the minimum value this quantity can represent.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Align[]

Key for Kernel::Arg::Metadata::mAlign.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

@ C

The default llvm calling convention, compatible with C.

operand_type_match m_Reg()

SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)

Matches a constant equal to RequestedValue.

operand_type_match m_Pred()

UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)

ConstantMatch< APInt > m_ICst(APInt &Cst)

BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)

OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)

CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)

bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)

BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)

BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)

unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)

InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)

SmallVector< Inst, 8 > InstSeq

static constexpr int64_t VLMaxSentinel

@ SingleThread

Synchronized with respect to signal handlers executing in the same thread.

@ System

Synchronized with respect to all concurrently executing threads.

This is an optimization pass for GlobalISel generic memory operations.

PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank

Convenient type to represent either a register class or a register bank.

constexpr bool isInt(int64_t x)

Checks if an integer fits into the given bit width.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

bool isStrongerThanMonotonic(AtomicOrdering AO)

int countr_one(T Value)

Count the number of ones from the least significant bit to the first zero bit.

LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)

Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...

int bit_width(T Value)

Returns the number of bits needed to represent Value if Value is nonzero.

LLVM_ABI MVT getMVTForLLT(LLT Ty)

Get a rough equivalent of an MVT for a given LLT.

InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)

Definition RISCVInstructionSelector.cpp:1886

LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)

If VReg is defined by a G_CONSTANT fits in int64_t returns it.

int countr_zero(T Val)

Count number of 0's from the least significant bit to the most stopping at the first 1.

unsigned Log2_32(uint32_t Value)

Return the floor log base 2 of the specified value, -1 if the value is zero.

MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)

Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.

constexpr bool isUInt(uint64_t x)

Checks if an unsigned integer fits into the given bit width.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

AtomicOrdering

Atomic ordering for LLVM's memory model.

constexpr T maskTrailingZeros(unsigned N)

Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.

@ Or

Bitwise or logical OR of integers.

@ Xor

Bitwise or logical XOR of integers.

@ And

Bitwise or logical AND of integers.

DWARFExpression::Operation Op

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

constexpr T maskTrailingOnes(unsigned N)

Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.

LLVM_ABI void reportFatalUsageError(Error Err)

Report a fatal error that does not indicate a bug in LLVM.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)

Return a MachinePointerInfo record that refers to a GOT entry.