LLVM: lib/Target/RISCV/RISCVInstrInfo.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

38

39using namespace llvm;

40

41#define GEN_CHECK_COMPRESS_INSTR

42#include "RISCVGenCompressInstEmitter.inc"

43

44#define GET_INSTRINFO_CTOR_DTOR

45#define GET_INSTRINFO_NAMED_OPS

46#include "RISCVGenInstrInfo.inc"

47

48#define DEBUG_TYPE "riscv-instr-info"

50 "Number of registers within vector register groups spilled");

52 "Number of registers within vector register groups reloaded");

53

56 cl::desc("Prefer whole register move for vector registers."));

57

59 "riscv-force-machine-combiner-strategy", cl::Hidden,

60 cl::desc("Force machine combiner to use a specific strategy for machine "

61 "trace metrics evaluation."),

64 "Local strategy."),

66 "MinInstrCount strategy.")));

67

69

70using namespace RISCV;

71

72#define GET_RISCVVPseudosTable_IMPL

73#include "RISCVGenSearchableTables.inc"

74

75}

76

78

79#define GET_RISCVMaskedPseudosTable_IMPL

80#include "RISCVGenSearchableTables.inc"

81

82}

83

88

89#define GET_INSTRINFO_HELPERS

90#include "RISCVGenInstrInfo.inc"

91

93 if (STI.hasStdExtZca())

99}

100

102 int &FrameIndex) const {

105}

106

108 switch (Opcode) {

109 default:

110 return std::nullopt;

111 case RISCV::VS1R_V:

112 case RISCV::VL1RE8_V:

113 case RISCV::VL1RE16_V:

114 case RISCV::VL1RE32_V:

115 case RISCV::VL1RE64_V:

116 return 1;

117 case RISCV::VS2R_V:

118 case RISCV::VL2RE8_V:

119 case RISCV::VL2RE16_V:

120 case RISCV::VL2RE32_V:

121 case RISCV::VL2RE64_V:

122 return 2;

123 case RISCV::VS4R_V:

124 case RISCV::VL4RE8_V:

125 case RISCV::VL4RE16_V:

126 case RISCV::VL4RE32_V:

127 case RISCV::VL4RE64_V:

128 return 4;

129 case RISCV::VS8R_V:

130 case RISCV::VL8RE8_V:

131 case RISCV::VL8RE16_V:

132 case RISCV::VL8RE32_V:

133 case RISCV::VL8RE64_V:

134 return 8;

135 }

136}

137

139 int &FrameIndex,

141 switch (MI.getOpcode()) {

142 default:

143 return 0;

144 case RISCV::LB:

145 case RISCV::LBU:

147 break;

148 case RISCV::LH:

149 case RISCV::LH_INX:

150 case RISCV::LHU:

151 case RISCV::FLH:

153 break;

154 case RISCV::LW:

155 case RISCV::LW_INX:

156 case RISCV::FLW:

157 case RISCV::LWU:

159 break;

160 case RISCV::LD:

161 case RISCV::LD_RV32:

162 case RISCV::FLD:

164 break;

165 case RISCV::VL1RE8_V:

166 case RISCV::VL2RE8_V:

167 case RISCV::VL4RE8_V:

168 case RISCV::VL8RE8_V:

169 if (MI.getOperand(1).isFI())

171 FrameIndex = MI.getOperand(1).getIndex();

174 return MI.getOperand(0).getReg();

175 }

176

177 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&

178 MI.getOperand(2).getImm() == 0) {

179 FrameIndex = MI.getOperand(1).getIndex();

180 return MI.getOperand(0).getReg();

181 }

182

183 return 0;

184}

185

187 int &FrameIndex) const {

190}

191

193 int &FrameIndex,

195 switch (MI.getOpcode()) {

196 default:

197 return 0;

198 case RISCV::SB:

200 break;

201 case RISCV::SH:

202 case RISCV::SH_INX:

203 case RISCV::FSH:

205 break;

206 case RISCV::SW:

207 case RISCV::SW_INX:

208 case RISCV::FSW:

210 break;

211 case RISCV::SD:

212 case RISCV::SD_RV32:

213 case RISCV::FSD:

215 break;

216 case RISCV::VS1R_V:

217 case RISCV::VS2R_V:

218 case RISCV::VS4R_V:

219 case RISCV::VS8R_V:

220 if (MI.getOperand(1).isFI())

222 FrameIndex = MI.getOperand(1).getIndex();

225 return MI.getOperand(0).getReg();

226 }

227

228 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&

229 MI.getOperand(2).getImm() == 0) {

230 FrameIndex = MI.getOperand(1).getIndex();

231 return MI.getOperand(0).getReg();

232 }

233

234 return 0;

235}

236

240 case RISCV::VMV_V_X:

241 case RISCV::VFMV_V_F:

242 case RISCV::VMV_V_I:

243 case RISCV::VMV_S_X:

244 case RISCV::VFMV_S_F:

245 case RISCV::VID_V:

246 return MI.getOperand(1).isUndef();

247 default:

249 }

250}

251

253 unsigned NumRegs) {

254 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;

255}

256

263 return false;

264

265 assert(MBBI->getOpcode() == TargetOpcode::COPY &&

266 "Unexpected COPY instruction.");

267 Register SrcReg = MBBI->getOperand(1).getReg();

269

270 bool FoundDef = false;

271 bool FirstVSetVLI = false;

272 unsigned FirstSEW = 0;

273 while (MBBI != MBB.begin()) {

275 if (MBBI->isMetaInstruction())

276 continue;

277

278 if (RISCVInstrInfo::isVectorConfigInstr(*MBBI)) {

279

280

281

282

283

284

285 if (!FoundDef) {

286 if (!FirstVSetVLI) {

287 FirstVSetVLI = true;

288 unsigned FirstVType = MBBI->getOperand(2).getImm();

291

292

293 if (FirstLMul != LMul)

294 return false;

295 }

296

297

298 if (!RISCVInstrInfo::isVLPreservingConfig(*MBBI))

299 return false;

300 continue;

301 }

302

303

304 unsigned VType = MBBI->getOperand(2).getImm();

305

306 if (FirstVSetVLI) {

307

309 return false;

310 }

311

312

314 return false;

315

316

317

318

319

320

322 } else if (MBBI->isInlineAsm() || MBBI->isCall()) {

323 return false;

324 } else if (MBBI->getNumDefs()) {

325

326

327 if (MBBI->modifiesRegister(RISCV::VL, nullptr))

328 return false;

329

330

331

333 if (!MO.isReg() || !MO.isDef())

334 continue;

335 if (!FoundDef && TRI->regsOverlap(MO.getReg(), SrcReg)) {

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350 if (MO.getReg() != SrcReg)

351 return false;

352

353

354

355

356

357

358

359

360

361

364 return false;

365

366

367

369 return false;

370

371

372 FoundDef = true;

373 DefMBBI = MBBI;

374 break;

375 }

376 }

377 }

378 }

379

380 return false;

381}

382

390

391 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);

392 uint16_t DstEncoding = TRI->getEncodingValue(DstReg);

394 assert(!Fractional && "It is impossible be fractional lmul here.");

395 unsigned NumRegs = NF * LMulVal;

396 bool ReversedCopy =

398 if (ReversedCopy) {

399

400

401 SrcEncoding += NumRegs - 1;

402 DstEncoding += NumRegs - 1;

403 }

404

405 unsigned I = 0;

406 auto GetCopyInfo = [&](uint16_t SrcEncoding, uint16_t DstEncoding)

408 unsigned, unsigned> {

409 if (ReversedCopy) {

410

411

412

413

414

415

416 uint16_t Diff = DstEncoding - SrcEncoding;

417 if (I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&

418 DstEncoding % 8 == 7)

420 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};

421 if (I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&

422 DstEncoding % 4 == 3)

424 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};

425 if (I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&

426 DstEncoding % 2 == 1)

428 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};

429

431 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};

432 }

433

434

435

436 if (I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)

438 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};

439 if (I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)

441 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};

442 if (I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)

444 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};

445

447 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};

448 };

449

450 while (I != NumRegs) {

451

452

453

454

455 auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =

456 GetCopyInfo(SrcEncoding, DstEncoding);

458

460 if (LMul == LMulCopied &&

462 Opc = VVOpc;

463 if (DefMBBI->getOpcode() == VIOpc)

464 Opc = VIOpc;

465 }

466

467

468

469 MCRegister ActualSrcReg = TRI->findVRegWithEncoding(

470 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);

471 MCRegister ActualDstReg = TRI->findVRegWithEncoding(

472 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);

473

477 if (UseVMV)

479 if (UseVMV_V_I)

480 MIB = MIB.add(DefMBBI->getOperand(2));

481 else

482 MIB = MIB.addReg(ActualSrcReg, getKillRegState(KillSrc));

483 if (UseVMV) {

486 unsigned Log2SEW =

488 MIB.addImm(Log2SEW ? Log2SEW : 3);

489 MIB.addImm(0);

492 }

493

494

495

496

498

499

500 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);

501 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);

502 I += NumCopied;

503 }

504}

505

509 Register SrcReg, bool KillSrc,

510 bool RenamableDest, bool RenamableSrc) const {

513

514 if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {

518 return;

519 }

520

521 if (RISCV::GPRF16RegClass.contains(DstReg, SrcReg)) {

524 return;

525 }

526

527 if (RISCV::GPRF32RegClass.contains(DstReg, SrcReg)) {

530 return;

531 }

532

533 if (RISCV::GPRPairRegClass.contains(DstReg, SrcReg)) {

534 if (STI.isRV32() && STI.hasStdExtZdinx()) {

535

536

540 return;

541 }

542

543 MCRegister EvenReg = TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);

544 MCRegister OddReg = TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);

545

546 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)

547 OddReg = RISCV::X0;

548 assert(DstReg != RISCV::X0_Pair && "Cannot write to X0_Pair");

549

550

552 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))

553 .addReg(EvenReg, KillFlag)

556 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))

557 .addReg(OddReg, KillFlag)

559 return;

560 }

561

562

563 if (RISCV::VCSRRegClass.contains(SrcReg) &&

564 RISCV::GPRRegClass.contains(DstReg)) {

566 .addImm(RISCVSysReg::lookupSysRegByName(TRI->getName(SrcReg))->Encoding)

568 return;

569 }

570

571 if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {

572 unsigned Opc;

573 if (STI.hasStdExtZfh()) {

574 Opc = RISCV::FSGNJ_H;

575 } else {

577 (STI.hasStdExtZfhmin() || STI.hasStdExtZfbfmin()) &&

578 "Unexpected extensions");

579

580 DstReg = TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,

581 &RISCV::FPR32RegClass);

582 SrcReg = TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,

583 &RISCV::FPR32RegClass);

584 Opc = RISCV::FSGNJ_S;

585 }

587 .addReg(SrcReg, KillFlag)

588 .addReg(SrcReg, KillFlag);

589 return;

590 }

591

592 if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {

594 .addReg(SrcReg, KillFlag)

595 .addReg(SrcReg, KillFlag);

596 return;

597 }

598

599 if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {

601 .addReg(SrcReg, KillFlag)

602 .addReg(SrcReg, KillFlag);

603 return;

604 }

605

606 if (RISCV::FPR32RegClass.contains(DstReg) &&

607 RISCV::GPRRegClass.contains(SrcReg)) {

609 .addReg(SrcReg, KillFlag);

610 return;

611 }

612

613 if (RISCV::GPRRegClass.contains(DstReg) &&

614 RISCV::FPR32RegClass.contains(SrcReg)) {

616 .addReg(SrcReg, KillFlag);

617 return;

618 }

619

620 if (RISCV::FPR64RegClass.contains(DstReg) &&

621 RISCV::GPRRegClass.contains(SrcReg)) {

622 assert(STI.getXLen() == 64 && "Unexpected GPR size");

624 .addReg(SrcReg, KillFlag);

625 return;

626 }

627

628 if (RISCV::GPRRegClass.contains(DstReg) &&

629 RISCV::FPR64RegClass.contains(SrcReg)) {

630 assert(STI.getXLen() == 64 && "Unexpected GPR size");

632 .addReg(SrcReg, KillFlag);

633 return;

634 }

635

636

638 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);

641 return;

642 }

643

645}

646

649 Register SrcReg, bool IsKill, int FI,

655

656 unsigned Opcode;

657 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {

658 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW

659 : RISCV::SD;

660 } else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {

661 Opcode = RISCV::SH_INX;

662 } else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {

663 Opcode = RISCV::SW_INX;

664 } else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {

665 Opcode = RISCV::PseudoRV32ZdinxSD;

666 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {

667 Opcode = RISCV::FSH;

668 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {

669 Opcode = RISCV::FSW;

670 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {

671 Opcode = RISCV::FSD;

672 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {

673 Opcode = RISCV::VS1R_V;

674 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {

675 Opcode = RISCV::VS2R_V;

676 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {

677 Opcode = RISCV::VS4R_V;

678 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {

679 Opcode = RISCV::VS8R_V;

680 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))

681 Opcode = RISCV::PseudoVSPILL2_M1;

682 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))

683 Opcode = RISCV::PseudoVSPILL2_M2;

684 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))

685 Opcode = RISCV::PseudoVSPILL2_M4;

686 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))

687 Opcode = RISCV::PseudoVSPILL3_M1;

688 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))

689 Opcode = RISCV::PseudoVSPILL3_M2;

690 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))

691 Opcode = RISCV::PseudoVSPILL4_M1;

692 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))

693 Opcode = RISCV::PseudoVSPILL4_M2;

694 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))

695 Opcode = RISCV::PseudoVSPILL5_M1;

696 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))

697 Opcode = RISCV::PseudoVSPILL6_M1;

698 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))

699 Opcode = RISCV::PseudoVSPILL7_M1;

700 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))

701 Opcode = RISCV::PseudoVSPILL8_M1;

702 else

704

709

717 } else {

721

728 }

729}

730

741

742 unsigned Opcode;

743 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {

744 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW

745 : RISCV::LD;

746 } else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {

747 Opcode = RISCV::LH_INX;

748 } else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {

749 Opcode = RISCV::LW_INX;

750 } else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {

751 Opcode = RISCV::PseudoRV32ZdinxLD;

752 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {

753 Opcode = RISCV::FLH;

754 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {

755 Opcode = RISCV::FLW;

756 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {

757 Opcode = RISCV::FLD;

758 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {

759 Opcode = RISCV::VL1RE8_V;

760 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {

761 Opcode = RISCV::VL2RE8_V;

762 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {

763 Opcode = RISCV::VL4RE8_V;

764 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {

765 Opcode = RISCV::VL8RE8_V;

766 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))

767 Opcode = RISCV::PseudoVRELOAD2_M1;

768 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))

769 Opcode = RISCV::PseudoVRELOAD2_M2;

770 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))

771 Opcode = RISCV::PseudoVRELOAD2_M4;

772 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))

773 Opcode = RISCV::PseudoVRELOAD3_M1;

774 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))

775 Opcode = RISCV::PseudoVRELOAD3_M2;

776 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))

777 Opcode = RISCV::PseudoVRELOAD4_M1;

778 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))

779 Opcode = RISCV::PseudoVRELOAD4_M2;

780 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))

781 Opcode = RISCV::PseudoVRELOAD5_M1;

782 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))

783 Opcode = RISCV::PseudoVRELOAD6_M1;

784 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))

785 Opcode = RISCV::PseudoVRELOAD7_M1;

786 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))

787 Opcode = RISCV::PseudoVRELOAD8_M1;

788 else

789 llvm_unreachable("Can't load this register from stack slot");

790

795

802 } else {

806

812 }

813}

817

818

819

820

822 return std::nullopt;

823

824

825 if (Ops.size() != 1 || Ops[0] != 1)

826 return std::nullopt;

827

828 switch (MI.getOpcode()) {

829 default:

830 if (RISCVInstrInfo::isSEXT_W(MI))

831 return RISCV::LW;

832 if (RISCVInstrInfo::isZEXT_W(MI))

833 return RISCV::LWU;

834 if (RISCVInstrInfo::isZEXT_B(MI))

835 return RISCV::LBU;

836 break;

837 case RISCV::SEXT_H:

838 return RISCV::LH;

839 case RISCV::SEXT_B:

840 return RISCV::LB;

841 case RISCV::ZEXT_H_RV32:

842 case RISCV::ZEXT_H_RV64:

843 return RISCV::LHU;

844 }

845

847 default:

848 return std::nullopt;

849 case RISCV::VMV_X_S: {

850 unsigned Log2SEW =

852 if (ST.getXLen() < (1U << Log2SEW))

853 return std::nullopt;

854 switch (Log2SEW) {

855 case 3:

856 return RISCV::LB;

857 case 4:

858 return RISCV::LH;

859 case 5:

860 return RISCV::LW;

861 case 6:

862 return RISCV::LD;

863 default:

865 }

866 }

867 case RISCV::VFMV_F_S: {

868 unsigned Log2SEW =

870 switch (Log2SEW) {

871 case 4:

872 return RISCV::FLH;

873 case 5:

874 return RISCV::FLW;

875 case 6:

876 return RISCV::FLD;

877 default:

879 }

880 }

881 }

882}

883

884

889

891 if (!LoadOpc)

892 return nullptr;

893 Register DstReg = MI.getOperand(0).getReg();

894 return BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(*LoadOpc),

895 DstReg)

898}

899

904 bool DstIsDead) const {

906

907

909

911 report_fatal_error("Should only materialize 32-bit constants for RV32");

912

913

915 }

916

919

920 bool SrcRenamable = false;

921 unsigned Num = 0;

922

924 bool LastItem = ++Num == Seq.size();

925 unsigned DstRegState = getDeadRegState(DstIsDead && LastItem) |

927 unsigned SrcRegState = getKillRegState(SrcReg != RISCV::X0) |

929 switch (Inst.getOpndKind()) {

933 .addImm(Inst.getImm())

935 break;

939 .addReg(SrcReg, SrcRegState)

942 break;

946 .addReg(SrcReg, SrcRegState)

947 .addReg(SrcReg, SrcRegState)

949 break;

953 .addReg(SrcReg, SrcRegState)

954 .addImm(Inst.getImm())

956 break;

957 }

958

959

960 SrcReg = DstReg;

961 SrcRenamable = DstRenamable;

962 }

963}

964

966 switch (Opc) {

967 default:

969 case RISCV::BEQ:

970 case RISCV::BEQI:

971 case RISCV::CV_BEQIMM:

972 case RISCV::QC_BEQI:

973 case RISCV::QC_E_BEQI:

974 case RISCV::NDS_BBC:

975 case RISCV::NDS_BEQC:

977 case RISCV::BNE:

978 case RISCV::BNEI:

979 case RISCV::QC_BNEI:

980 case RISCV::QC_E_BNEI:

981 case RISCV::CV_BNEIMM:

982 case RISCV::NDS_BBS:

983 case RISCV::NDS_BNEC:

985 case RISCV::BLT:

986 case RISCV::QC_BLTI:

987 case RISCV::QC_E_BLTI:

989 case RISCV::BGE:

990 case RISCV::QC_BGEI:

991 case RISCV::QC_E_BGEI:

993 case RISCV::BLTU:

994 case RISCV::QC_BLTUI:

995 case RISCV::QC_E_BLTUI:

997 case RISCV::BGEU:

998 case RISCV::QC_BGEUI:

999 case RISCV::QC_E_BGEUI:

1001 }

1002}

1003

1005 int64_t C1) {

1006 switch (CC) {

1007 default:

1010 return C0 == C1;

1012 return C0 != C1;

1014 return C0 < C1;

1016 return C0 >= C1;

1021 }

1022}

1023

1024

1025

1026

1029

1031 "Unknown conditional branch");

1036}

1037

1039 switch (Opcode) {

1040 default:

1042 case RISCV::QC_MVEQ:

1043 return RISCV::QC_MVNE;

1044 case RISCV::QC_MVNE:

1045 return RISCV::QC_MVEQ;

1046 case RISCV::QC_MVLT:

1047 return RISCV::QC_MVGE;

1048 case RISCV::QC_MVGE:

1049 return RISCV::QC_MVLT;

1050 case RISCV::QC_MVLTU:

1051 return RISCV::QC_MVGEU;

1052 case RISCV::QC_MVGEU:

1053 return RISCV::QC_MVLTU;

1054 case RISCV::QC_MVEQI:

1055 return RISCV::QC_MVNEI;

1056 case RISCV::QC_MVNEI:

1057 return RISCV::QC_MVEQI;

1058 case RISCV::QC_MVLTI:

1059 return RISCV::QC_MVGEI;

1060 case RISCV::QC_MVGEI:

1061 return RISCV::QC_MVLTI;

1062 case RISCV::QC_MVLTUI:

1063 return RISCV::QC_MVGEUI;

1064 case RISCV::QC_MVGEUI:

1065 return RISCV::QC_MVLTUI;

1066 }

1067}

1068

1070 switch (SelectOpc) {

1071 default:

1072 switch (CC) {

1073 default:

1076 return RISCV::BEQ;

1078 return RISCV::BNE;

1080 return RISCV::BLT;

1082 return RISCV::BGE;

1084 return RISCV::BLTU;

1086 return RISCV::BGEU;

1087 }

1088 break;

1089 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:

1090 switch (CC) {

1091 default:

1094 return RISCV::BEQI;

1096 return RISCV::BNEI;

1097 }

1098 break;

1099 case RISCV::Select_GPR_Using_CC_SImm5_CV:

1100 switch (CC) {

1101 default:

1104 return RISCV::CV_BEQIMM;

1106 return RISCV::CV_BNEIMM;

1107 }

1108 break;

1109 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:

1110 switch (CC) {

1111 default:

1114 return RISCV::QC_BEQI;

1116 return RISCV::QC_BNEI;

1118 return RISCV::QC_BLTI;

1120 return RISCV::QC_BGEI;

1121 }

1122 break;

1123 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:

1124 switch (CC) {

1125 default:

1128 return RISCV::QC_BLTUI;

1130 return RISCV::QC_BGEUI;

1131 }

1132 break;

1133 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:

1134 switch (CC) {

1135 default:

1138 return RISCV::QC_E_BEQI;

1140 return RISCV::QC_E_BNEI;

1142 return RISCV::QC_E_BLTI;

1144 return RISCV::QC_E_BGEI;

1145 }

1146 break;

1147 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:

1148 switch (CC) {

1149 default:

1152 return RISCV::QC_E_BLTUI;

1154 return RISCV::QC_E_BGEUI;

1155 }

1156 break;

1157 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:

1158 switch (CC) {

1159 default:

1162 return RISCV::NDS_BBC;

1164 return RISCV::NDS_BBS;

1165 }

1166 break;

1167 case RISCV::Select_GPR_Using_CC_UImm7_NDS:

1168 switch (CC) {

1169 default:

1172 return RISCV::NDS_BEQC;

1174 return RISCV::NDS_BNEC;

1175 }

1176 break;

1177 }

1178}

1179

1181 switch (CC) {

1182 default:

1196 }

1197}

1198

1203 bool AllowModify) const {

1204 TBB = FBB = nullptr;

1205 Cond.clear();

1206

1207

1209 if (I == MBB.end() || !isUnpredicatedTerminator(*I))

1210 return false;

1211

1212

1213

1215 int NumTerminators = 0;

1216 for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);

1217 J++) {

1218 NumTerminators++;

1219 if (J->getDesc().isUnconditionalBranch() ||

1220 J->getDesc().isIndirectBranch()) {

1221 FirstUncondOrIndirectBr = J.getReverse();

1222 }

1223 }

1224

1225

1226

1227 if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {

1228 while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {

1229 std::next(FirstUncondOrIndirectBr)->eraseFromParent();

1230 NumTerminators--;

1231 }

1232 I = FirstUncondOrIndirectBr;

1233 }

1234

1235

1236 if (I->getDesc().isIndirectBranch())

1237 return true;

1238

1239

1240 if (I->isPreISelOpcode())

1241 return true;

1242

1243

1244 if (NumTerminators > 2)

1245 return true;

1246

1247

1248 if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {

1250 return false;

1251 }

1252

1253

1254 if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {

1256 return false;

1257 }

1258

1259

1260 if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&

1261 I->getDesc().isUnconditionalBranch()) {

1264 return false;

1265 }

1266

1267

1268 return true;

1269}

1270

1272 int *BytesRemoved) const {

1273 if (BytesRemoved)

1274 *BytesRemoved = 0;

1276 if (I == MBB.end())

1277 return 0;

1278

1279 if (I->getDesc().isUnconditionalBranch() &&

1280 I->getDesc().isConditionalBranch())

1281 return 0;

1282

1283

1284 if (BytesRemoved)

1286 I->eraseFromParent();

1287

1288 I = MBB.end();

1289

1290 if (I == MBB.begin())

1291 return 1;

1292 --I;

1293 if (I->getDesc().isConditionalBranch())

1294 return 1;

1295

1296

1297 if (BytesRemoved)

1299 I->eraseFromParent();

1300 return 2;

1301}

1302

1303

1304

1308 if (BytesAdded)

1309 *BytesAdded = 0;

1310

1311

1312 assert(TBB && "insertBranch must not be told to insert a fallthrough");

1314 "RISC-V branch conditions have two components!");

1315

1316

1317 if (Cond.empty()) {

1319 if (BytesAdded)

1321 return 1;

1322 }

1323

1324

1329 if (BytesAdded)

1331

1332

1333 if (!FBB)

1334 return 1;

1335

1336

1338 if (BytesAdded)

1340 return 2;

1341}

1342

1346 const DebugLoc &DL, int64_t BrOffset,

1348 assert(RS && "RegScavenger required for long branching");

1350 "new block should be inserted for expanding unconditional branch");

1353 "restore block should be inserted for restoring clobbered registers");

1354

1359

1362 "Branch offsets outside of the signed 32-bit range not supported");

1363

1364

1365

1366

1367 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);

1368 auto II = MBB.end();

1369

1373

1374 RS->enterBasicBlockEnd(MBB);

1376 if (STI.hasStdExtZicfilp())

1377 RC = &RISCV::GPRX7RegClass;

1379 RS->scavengeRegisterBackwards(*RC, MI.getIterator(),

1380 false, 0,

1381 false);

1383 RS->setRegUsed(TmpGPR);

1384 else {

1385

1386

1387

1388 TmpGPR = STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;

1389

1390 if (STI.hasStdExtZicfilp())

1391 TmpGPR = RISCV::X7;

1392

1394 if (FrameIndex == -1)

1396

1398 &RISCV::GPRRegClass, Register());

1399 TRI->eliminateFrameIndex(std::prev(MI.getIterator()),

1400 0, 1);

1401

1402 MI.getOperand(1).setMBB(&RestoreBB);

1403

1405 &RISCV::GPRRegClass, Register());

1406 TRI->eliminateFrameIndex(RestoreBB.back(),

1407 0, 1);

1408 }

1409

1410 MRI.replaceRegWith(ScratchReg, TmpGPR);

1411 MRI.clearVirtRegs();

1412}

1413

1416 assert((Cond.size() == 3) && "Invalid branch condition!");

1418 default:

1420 case RISCV::BEQ:

1421 Cond[0].setImm(RISCV::BNE);

1422 break;

1423 case RISCV::BEQI:

1424 Cond[0].setImm(RISCV::BNEI);

1425 break;

1426 case RISCV::BNE:

1427 Cond[0].setImm(RISCV::BEQ);

1428 break;

1429 case RISCV::BNEI:

1430 Cond[0].setImm(RISCV::BEQI);

1431 break;

1432 case RISCV::BLT:

1433 Cond[0].setImm(RISCV::BGE);

1434 break;

1435 case RISCV::BGE:

1436 Cond[0].setImm(RISCV::BLT);

1437 break;

1438 case RISCV::BLTU:

1439 Cond[0].setImm(RISCV::BGEU);

1440 break;

1441 case RISCV::BGEU:

1442 Cond[0].setImm(RISCV::BLTU);

1443 break;

1444 case RISCV::CV_BEQIMM:

1445 Cond[0].setImm(RISCV::CV_BNEIMM);

1446 break;

1447 case RISCV::CV_BNEIMM:

1448 Cond[0].setImm(RISCV::CV_BEQIMM);

1449 break;

1450 case RISCV::QC_BEQI:

1451 Cond[0].setImm(RISCV::QC_BNEI);

1452 break;

1453 case RISCV::QC_BNEI:

1454 Cond[0].setImm(RISCV::QC_BEQI);

1455 break;

1456 case RISCV::QC_BGEI:

1457 Cond[0].setImm(RISCV::QC_BLTI);

1458 break;

1459 case RISCV::QC_BLTI:

1460 Cond[0].setImm(RISCV::QC_BGEI);

1461 break;

1462 case RISCV::QC_BGEUI:

1463 Cond[0].setImm(RISCV::QC_BLTUI);

1464 break;

1465 case RISCV::QC_BLTUI:

1466 Cond[0].setImm(RISCV::QC_BGEUI);

1467 break;

1468 case RISCV::QC_E_BEQI:

1469 Cond[0].setImm(RISCV::QC_E_BNEI);

1470 break;

1471 case RISCV::QC_E_BNEI:

1472 Cond[0].setImm(RISCV::QC_E_BEQI);

1473 break;

1474 case RISCV::QC_E_BGEI:

1475 Cond[0].setImm(RISCV::QC_E_BLTI);

1476 break;

1477 case RISCV::QC_E_BLTI:

1478 Cond[0].setImm(RISCV::QC_E_BGEI);

1479 break;

1480 case RISCV::QC_E_BGEUI:

1481 Cond[0].setImm(RISCV::QC_E_BLTUI);

1482 break;

1483 case RISCV::QC_E_BLTUI:

1484 Cond[0].setImm(RISCV::QC_E_BGEUI);

1485 break;

1486 case RISCV::NDS_BBC:

1487 Cond[0].setImm(RISCV::NDS_BBS);

1488 break;

1489 case RISCV::NDS_BBS:

1490 Cond[0].setImm(RISCV::NDS_BBC);

1491 break;

1492 case RISCV::NDS_BEQC:

1493 Cond[0].setImm(RISCV::NDS_BNEC);

1494 break;

1495 case RISCV::NDS_BNEC:

1496 Cond[0].setImm(RISCV::NDS_BEQC);

1497 break;

1498 }

1499

1500 return false;

1501}

1502

1503

1504

1506 if (MI->getOpcode() == RISCV::ADDI && MI->getOperand(1).isReg() &&

1507 MI->getOperand(1).getReg() == RISCV::X0) {

1508 Imm = MI->getOperand(2).getImm();

1509 return true;

1510 }

1511 return false;

1512}

1513

1516

1517 if (Op.isReg())

1518 return false;

1519

1521 if (Reg == RISCV::X0) {

1522 Imm = 0;

1523 return true;

1524 }

1525 return Reg.isVirtual() && isLoadImm(MRI.getVRegDef(Reg), Imm);

1526}

1527

1529 bool IsSigned = false;

1530 bool IsEquality = false;

1531 switch (MI.getOpcode()) {

1532 default:

1533 return false;

1534 case RISCV::BEQ:

1535 case RISCV::BNE:

1536 IsEquality = true;

1537 break;

1538 case RISCV::BGE:

1539 case RISCV::BLT:

1540 IsSigned = true;

1541 break;

1542 case RISCV::BGEU:

1543 case RISCV::BLTU:

1544 break;

1545 }

1546

1549

1553

1556

1557

1558

1559 int64_t C0, C1;

1561 unsigned NewOpc = evaluateCondBranch(CC, C0, C1) ? RISCV::BEQ : RISCV::BNE;

1562

1567 MI.eraseFromParent();

1568 return true;

1569 }

1570

1571 if (IsEquality)

1572 return false;

1573

1574

1575

1576

1577

1578

1579

1580

1581

1582

1583

1584

1585

1586

1587

1588

1589

1590

1591

1592

1593 auto searchConst = [&](int64_t C1) -> Register {

1595 auto DefC1 = std::find_if(++II, E, [&](const MachineInstr &I) -> bool {

1596 int64_t Imm;

1597 return isLoadImm(&I, Imm) && Imm == C1 &&

1598 I.getOperand(0).getReg().isVirtual();

1599 });

1600 if (DefC1 != E)

1601 return DefC1->getOperand(0).getReg();

1602

1604 };

1605

1606 unsigned NewOpc = RISCVCC::getBrCond(getInverseBranchCondition(CC));

1607

1608

1609

1610

1611

1612

1613 if (isFromLoadImm(MRI, LHS, C0) && C0 != 0 && LHS.getReg().isVirtual() &&

1614 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {

1616 if (Register RegZ = searchConst(C0 + 1)) {

1618 .add(RHS)

1621

1622

1623 MRI.clearKillFlags(RegZ);

1624 MI.eraseFromParent();

1625 return true;

1626 }

1627 }

1628

1629

1630

1631

1632

1633

1634 if (isFromLoadImm(MRI, RHS, C0) && C0 != 0 && RHS.getReg().isVirtual() &&

1635 MRI.hasOneUse(RHS.getReg())) {

1637 if (Register RegZ = searchConst(C0 - 1)) {

1640 .add(LHS)

1642

1643

1644 MRI.clearKillFlags(RegZ);

1645 MI.eraseFromParent();

1646 return true;

1647 }

1648 }

1649

1650 return false;

1651}

1652

1655 assert(MI.getDesc().isBranch() && "Unexpected opcode!");

1656

1657 int NumOp = MI.getNumExplicitOperands();

1658 return MI.getOperand(NumOp - 1).getMBB();

1659}

1660

1662 int64_t BrOffset) const {

1663 unsigned XLen = STI.getXLen();

1664

1665

1666

1667 switch (BranchOp) {

1668 default:

1670 case RISCV::NDS_BBC:

1671 case RISCV::NDS_BBS:

1672 case RISCV::NDS_BEQC:

1673 case RISCV::NDS_BNEC:

1675 case RISCV::BEQ:

1676 case RISCV::BNE:

1677 case RISCV::BLT:

1678 case RISCV::BGE:

1679 case RISCV::BLTU:

1680 case RISCV::BGEU:

1681 case RISCV::BEQI:

1682 case RISCV::BNEI:

1683 case RISCV::CV_BEQIMM:

1684 case RISCV::CV_BNEIMM:

1685 case RISCV::QC_BEQI:

1686 case RISCV::QC_BNEI:

1687 case RISCV::QC_BGEI:

1688 case RISCV::QC_BLTI:

1689 case RISCV::QC_BLTUI:

1690 case RISCV::QC_BGEUI:

1691 case RISCV::QC_E_BEQI:

1692 case RISCV::QC_E_BNEI:

1693 case RISCV::QC_E_BGEI:

1694 case RISCV::QC_E_BLTI:

1695 case RISCV::QC_E_BLTUI:

1696 case RISCV::QC_E_BGEUI:

1698 case RISCV::JAL:

1699 case RISCV::PseudoBR:

1701 case RISCV::PseudoJump:

1703 }

1704}

1705

1706

1707

1708

1710

1711 switch (Opcode) {

1712 case RISCV::ADD: return RISCV::PseudoCCADD;

1713 case RISCV::SUB: return RISCV::PseudoCCSUB;

1714 case RISCV::SLL: return RISCV::PseudoCCSLL;

1715 case RISCV::SRL: return RISCV::PseudoCCSRL;

1716 case RISCV::SRA: return RISCV::PseudoCCSRA;

1717 case RISCV::AND: return RISCV::PseudoCCAND;

1718 case RISCV::OR: return RISCV::PseudoCCOR;

1719 case RISCV::XOR: return RISCV::PseudoCCXOR;

1720 case RISCV::MAX: return RISCV::PseudoCCMAX;

1721 case RISCV::MAXU: return RISCV::PseudoCCMAXU;

1722 case RISCV::MIN: return RISCV::PseudoCCMIN;

1723 case RISCV::MINU: return RISCV::PseudoCCMINU;

1724 case RISCV::MUL: return RISCV::PseudoCCMUL;

1725 case RISCV::LUI: return RISCV::PseudoCCLUI;

1726 case RISCV::QC_LI: return RISCV::PseudoCCQC_LI;

1727 case RISCV::QC_E_LI: return RISCV::PseudoCCQC_E_LI;

1728

1729 case RISCV::ADDI: return RISCV::PseudoCCADDI;

1730 case RISCV::SLLI: return RISCV::PseudoCCSLLI;

1731 case RISCV::SRLI: return RISCV::PseudoCCSRLI;

1732 case RISCV::SRAI: return RISCV::PseudoCCSRAI;

1733 case RISCV::ANDI: return RISCV::PseudoCCANDI;

1734 case RISCV::ORI: return RISCV::PseudoCCORI;

1735 case RISCV::XORI: return RISCV::PseudoCCXORI;

1736

1737 case RISCV::ADDW: return RISCV::PseudoCCADDW;

1738 case RISCV::SUBW: return RISCV::PseudoCCSUBW;

1739 case RISCV::SLLW: return RISCV::PseudoCCSLLW;

1740 case RISCV::SRLW: return RISCV::PseudoCCSRLW;

1741 case RISCV::SRAW: return RISCV::PseudoCCSRAW;

1742

1743 case RISCV::ADDIW: return RISCV::PseudoCCADDIW;

1744 case RISCV::SLLIW: return RISCV::PseudoCCSLLIW;

1745 case RISCV::SRLIW: return RISCV::PseudoCCSRLIW;

1746 case RISCV::SRAIW: return RISCV::PseudoCCSRAIW;

1747

1748 case RISCV::ANDN: return RISCV::PseudoCCANDN;

1749 case RISCV::ORN: return RISCV::PseudoCCORN;

1750 case RISCV::XNOR: return RISCV::PseudoCCXNOR;

1751

1752 case RISCV::NDS_BFOS: return RISCV::PseudoCCNDS_BFOS;

1753 case RISCV::NDS_BFOZ: return RISCV::PseudoCCNDS_BFOZ;

1754 }

1755

1756

1757 return RISCV::INSTRUCTION_LIST_END;

1758}

1759

1760

1761

1766 if (Reg.isVirtual())

1767 return nullptr;

1768 if (MRI.hasOneNonDBGUse(Reg))

1769 return nullptr;

1771 if (MI)

1772 return nullptr;

1773

1774 if (!STI.hasShortForwardBranchIMinMax() &&

1775 (MI->getOpcode() == RISCV::MAX || MI->getOpcode() == RISCV::MIN ||

1776 MI->getOpcode() == RISCV::MINU || MI->getOpcode() == RISCV::MAXU))

1777 return nullptr;

1778

1779 if (!STI.hasShortForwardBranchIMul() && MI->getOpcode() == RISCV::MUL)

1780 return nullptr;

1781

1782

1784 return nullptr;

1785

1786 if (MI->getOpcode() == RISCV::ADDI && MI->getOperand(1).isReg() &&

1787 MI->getOperand(1).getReg() == RISCV::X0)

1788 return nullptr;

1789

1791

1792 if (MO.isFI() || MO.isCPI() || MO.isJTI())

1793 return nullptr;

1794 if (!MO.isReg())

1795 continue;

1796

1797 if (MO.isTied())

1798 return nullptr;

1799 if (MO.isDef())

1800 return nullptr;

1801

1802 if (MO.getReg().isPhysical() && MRI.isConstantPhysReg(MO.getReg()))

1803 return nullptr;

1804 }

1805 bool DontMoveAcrossStores = true;

1806 if (MI->isSafeToMove(DontMoveAcrossStores))

1807 return nullptr;

1808 return MI;

1809}

1810

1813 unsigned &TrueOp, unsigned &FalseOp,

1814 bool &Optimizable) const {

1815 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&

1816 "Unknown select instruction");

1817

1818

1819

1820

1821

1822

1823

1824 TrueOp = 5;

1825 FalseOp = 4;

1826 Cond.push_back(MI.getOperand(1));

1827 Cond.push_back(MI.getOperand(2));

1828 Cond.push_back(MI.getOperand(3));

1829

1830 Optimizable = STI.hasShortForwardBranchIALU();

1831 return false;

1832}

1833

1837 bool PreferFalse) const {

1838 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&

1839 "Unknown select instruction");

1840 if (STI.hasShortForwardBranchIALU())

1841 return nullptr;

1842

1846 bool Invert = DefMI;

1850 return nullptr;

1851

1852

1854 Register DestReg = MI.getOperand(0).getReg();

1856 if (MRI.constrainRegClass(DestReg, PreviousClass))

1857 return nullptr;

1858

1860 assert(PredOpc != RISCV::INSTRUCTION_LIST_END && "Unexpected opcode!");

1861

1862

1864 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(PredOpc), DestReg);

1865

1866

1867 NewMI.add(MI.getOperand(1));

1868 NewMI.add(MI.getOperand(2));

1869

1870

1872 if (Invert)

1875

1876

1877 NewMI.add(FalseReg);

1878

1879

1881 for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e; ++i)

1882 NewMI.add(DefMI->getOperand(i));

1883

1884

1885 SeenMIs.insert(NewMI);

1887

1888

1889

1890

1891

1892 if (DefMI->getParent() != MI.getParent())

1894

1895

1896 DefMI->eraseFromParent();

1897 return NewMI;

1898}

1899

1901 if (MI.isMetaInstruction())

1902 return 0;

1903

1904 unsigned Opcode = MI.getOpcode();

1905

1906 if (Opcode == TargetOpcode::INLINEASM ||

1907 Opcode == TargetOpcode::INLINEASM_BR) {

1909 return getInlineAsmLength(MI.getOperand(0).getSymbolName(),

1911 }

1912

1913 if (MI.memoperands_empty()) {

1916 if (STI.hasStdExtZca()) {

1917 if (isCompressibleInst(MI, STI))

1918 return 4;

1919 return 6;

1920 }

1921 return 8;

1922 }

1923 }

1924

1925 if (Opcode == TargetOpcode::BUNDLE)

1926 return getInstBundleLength(MI);

1927

1928 if (MI.getParent() && MI.getParent()->getParent()) {

1929 if (isCompressibleInst(MI, STI))

1930 return 2;

1931 }

1932

1933 switch (Opcode) {

1934 case RISCV::PseudoMV_FPR16INX:

1935 case RISCV::PseudoMV_FPR32INX:

1936

1937 return STI.hasStdExtZca() ? 2 : 4;

1938 case TargetOpcode::STACKMAP:

1939

1941 case TargetOpcode::PATCHPOINT:

1942

1944 case TargetOpcode::STATEPOINT: {

1945

1947

1948 return std::max(NumBytes, 8U);

1949 }

1950 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:

1951 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:

1952 case TargetOpcode::PATCHABLE_TAIL_CALL: {

1955 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&

1956 F.hasFnAttribute("patchable-function-entry")) {

1957 unsigned Num;

1958 if (F.getFnAttribute("patchable-function-entry")

1959 .getValueAsString()

1960 .getAsInteger(10, Num))

1961 return get(Opcode).getSize();

1962

1963

1964 return (STI.hasStdExtZca() ? 2 : 4) * Num;

1965 }

1966

1967

1968 return STI.is64Bit() ? 68 : 44;

1969 }

1970 default:

1971 return get(Opcode).getSize();

1972 }

1973}

1974

1975unsigned RISCVInstrInfo::getInstBundleLength(const MachineInstr &MI) const {

1976 unsigned Size = 0;

1979 while (++I != E && I->isInsideBundle()) {

1980 assert(I->isBundle() && "No nested bundle!");

1982 }

1983 return Size;

1984}

1985

1987 const unsigned Opcode = MI.getOpcode();

1988 switch (Opcode) {

1989 default:

1990 break;

1991 case RISCV::FSGNJ_D:

1992 case RISCV::FSGNJ_S:

1993 case RISCV::FSGNJ_H:

1994 case RISCV::FSGNJ_D_INX:

1995 case RISCV::FSGNJ_D_IN32X:

1996 case RISCV::FSGNJ_S_INX:

1997 case RISCV::FSGNJ_H_INX:

1998

1999 return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&

2000 MI.getOperand(1).getReg() == MI.getOperand(2).getReg();

2001 case RISCV::ADDI:

2002 case RISCV::ORI:

2003 case RISCV::XORI:

2004 return (MI.getOperand(1).isReg() &&

2005 MI.getOperand(1).getReg() == RISCV::X0) ||

2006 (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);

2007 }

2008 return MI.isAsCheapAsAMove();

2009}

2010

2011std::optional

2013 if (MI.isMoveReg())

2015 switch (MI.getOpcode()) {

2016 default:

2017 break;

2018 case RISCV::ADD:

2019 case RISCV::OR:

2020 case RISCV::XOR:

2021 if (MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0 &&

2022 MI.getOperand(2).isReg())

2024 if (MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0 &&

2025 MI.getOperand(1).isReg())

2027 break;

2028 case RISCV::ADDI:

2029

2030 if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&

2031 MI.getOperand(2).getImm() == 0)

2033 break;

2034 case RISCV::SUB:

2035 if (MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0 &&

2036 MI.getOperand(1).isReg())

2038 break;

2039 case RISCV::SH1ADD:

2040 case RISCV::SH1ADD_UW:

2041 case RISCV::SH2ADD:

2042 case RISCV::SH2ADD_UW:

2043 case RISCV::SH3ADD:

2044 case RISCV::SH3ADD_UW:

2045 if (MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0 &&

2046 MI.getOperand(2).isReg())

2048 break;

2049 case RISCV::FSGNJ_D:

2050 case RISCV::FSGNJ_S:

2051 case RISCV::FSGNJ_H:

2052 case RISCV::FSGNJ_D_INX:

2053 case RISCV::FSGNJ_D_IN32X:

2054 case RISCV::FSGNJ_S_INX:

2055 case RISCV::FSGNJ_H_INX:

2056

2057 if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&

2058 MI.getOperand(1).getReg() == MI.getOperand(2).getReg())

2060 break;

2061 }

2062 return std::nullopt;

2063}

2064

2067

2068

2069

2070 const auto &SchedModel = STI.getSchedModel();

2071 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())

2074 }

2075

2077}

2078

2082 int16_t FrmOpIdx =

2083 RISCV::getNamedOperandIdx(Root.getOpcode(), RISCV::OpName::frm);

2084 if (FrmOpIdx < 0) {

2087 return RISCV::getNamedOperandIdx(MI->getOpcode(),

2088 RISCV::OpName::frm) < 0;

2089 }) &&

2090 "New instructions require FRM whereas the old one does not have it");

2091 return;

2092 }

2093

2096

2097 for (auto *NewMI : InsInstrs) {

2098

2099 if (static_cast<unsigned>(RISCV::getNamedOperandIdx(

2100 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())

2101 continue;

2103 MIB.add(FRM);

2106 }

2107}

2108

2110 switch (Opc) {

2111 default:

2112 return false;

2113 case RISCV::FADD_H:

2114 case RISCV::FADD_S:

2115 case RISCV::FADD_D:

2116 return true;

2117 }

2118}

2119

2121 switch (Opc) {

2122 default:

2123 return false;

2124 case RISCV::FSUB_H:

2125 case RISCV::FSUB_S:

2126 case RISCV::FSUB_D:

2127 return true;

2128 }

2129}

2130

2132 switch (Opc) {

2133 default:

2134 return false;

2135 case RISCV::FMUL_H:

2136 case RISCV::FMUL_S:

2137 case RISCV::FMUL_D:

2138 return true;

2139 }

2140}

2141

2142bool RISCVInstrInfo::isVectorAssociativeAndCommutative(const MachineInstr &Inst,

2143 bool Invert) const {

2144#define OPCODE_LMUL_CASE(OPC) \

2145 case RISCV::OPC##_M1: \

2146 case RISCV::OPC##_M2: \

2147 case RISCV::OPC##_M4: \

2148 case RISCV::OPC##_M8: \

2149 case RISCV::OPC##_MF2: \

2150 case RISCV::OPC##_MF4: \

2151 case RISCV::OPC##_MF8

2152

2153#define OPCODE_LMUL_MASK_CASE(OPC) \

2154 case RISCV::OPC##_M1_MASK: \

2155 case RISCV::OPC##_M2_MASK: \

2156 case RISCV::OPC##_M4_MASK: \

2157 case RISCV::OPC##_M8_MASK: \

2158 case RISCV::OPC##_MF2_MASK: \

2159 case RISCV::OPC##_MF4_MASK: \

2160 case RISCV::OPC##_MF8_MASK

2161

2162 unsigned Opcode = Inst.getOpcode();

2163 if (Invert) {

2165 Opcode = *InvOpcode;

2166 else

2167 return false;

2168 }

2169

2170

2171 switch (Opcode) {

2172 default:

2173 return false;

2178 return true;

2179 }

2180

2181

2182#undef OPCODE_LMUL_MASK_CASE

2183#undef OPCODE_LMUL_CASE

2184}

2185

2186bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &Root,

2189 return false;

2190

2193 const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();

2194

2195

2197 const uint64_t TSFlags = Desc.TSFlags;

2198

2199 auto checkImmOperand = [&](unsigned OpIdx) {

2201 };

2202

2203 auto checkRegOperand = [&](unsigned OpIdx) {

2205 };

2206

2207

2208

2209

2210

2211 if (!checkRegOperand(1))

2212 return false;

2213

2214

2217 return false;

2218

2219

2221 const MachineBasicBlock *MBB = Root.getParent();

2225

2226 bool SeenMI2 = false;

2227 for (auto End = MBB->rend(), It = It1; It != End; ++It) {

2228 if (It == It2) {

2229 SeenMI2 = true;

2231

2232

2233 break;

2234 }

2235

2236 if (It->modifiesRegister(RISCV::V0, TRI)) {

2237 Register SrcReg = It->getOperand(1).getReg();

2238

2239

2241 return false;

2242

2243 if (!MI1VReg.isValid()) {

2244

2245 MI1VReg = SrcReg;

2246 continue;

2247 }

2248

2249

2250 if (!SeenMI2)

2251 continue;

2252

2253

2254

2255 if (MI1VReg != SrcReg)

2256 return false;

2257 else

2258 break;

2259 }

2260 }

2261

2262

2263

2264 assert(SeenMI2 && "Prev is expected to appear before Root");

2265 }

2266

2267

2270 return false;

2271

2272

2278 return false;

2279 switch (Op1.getType()) {

2282 return false;

2283 break;

2286 return false;

2287 break;

2288 default:

2290 }

2291 }

2292

2293

2296 return false;

2297

2298 return true;

2299}

2300

2301

2302

2303bool RISCVInstrInfo::hasReassociableVectorSibling(const MachineInstr &Inst,

2304 bool &Commuted) const {

2305 const MachineBasicBlock *MBB = Inst.getParent();

2308 "Expect the present of passthrough operand.");

2311

2312

2313

2314 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&

2315 areRVVInstsReassociable(Inst, *MI2);

2316 if (Commuted)

2318

2319 return areRVVInstsReassociable(Inst, *MI1) &&

2320 (isVectorAssociativeAndCommutative(*MI1) ||

2321 isVectorAssociativeAndCommutative(*MI1, true)) &&

2324}

2325

2328 if (!isVectorAssociativeAndCommutative(Inst) &&

2329 !isVectorAssociativeAndCommutative(Inst, true))

2331

2335

2336

2337

2341 MI1 = MRI.getUniqueVRegDef(Op1.getReg());

2343 MI2 = MRI.getUniqueVRegDef(Op2.getReg());

2344

2345

2347}

2348

2354

2355 for (unsigned I = 0; I < 5; ++I)

2357 }

2358}

2359

2361 bool &Commuted) const {

2362 if (isVectorAssociativeAndCommutative(Inst) ||

2363 isVectorAssociativeAndCommutative(Inst, true))

2364 return hasReassociableVectorSibling(Inst, Commuted);

2365

2367 return false;

2368

2370 unsigned OperandIdx = Commuted ? 2 : 1;

2373

2374 int16_t InstFrmOpIdx =

2375 RISCV::getNamedOperandIdx(Inst.getOpcode(), RISCV::OpName::frm);

2376 int16_t SiblingFrmOpIdx =

2377 RISCV::getNamedOperandIdx(Sibling.getOpcode(), RISCV::OpName::frm);

2378

2379 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||

2381}

2382

2384 bool Invert) const {

2385 if (isVectorAssociativeAndCommutative(Inst, Invert))

2386 return true;

2387

2389 if (Invert) {

2391 if (!InverseOpcode)

2392 return false;

2393 Opc = *InverseOpcode;

2394 }

2395

2399

2400 switch (Opc) {

2401 default:

2402 return false;

2403 case RISCV::ADD:

2404 case RISCV::ADDW:

2405 case RISCV::AND:

2406 case RISCV::OR:

2407 case RISCV::XOR:

2408

2409

2410

2411

2412

2413

2414

2415

2416

2417

2418

2419

2420 case RISCV::MUL:

2421 case RISCV::MULW:

2422 case RISCV::MIN:

2423 case RISCV::MINU:

2424 case RISCV::MAX:

2425 case RISCV::MAXU:

2426 case RISCV::FMIN_H:

2427 case RISCV::FMIN_S:

2428 case RISCV::FMIN_D:

2429 case RISCV::FMAX_H:

2430 case RISCV::FMAX_S:

2431 case RISCV::FMAX_D:

2432 return true;

2433 }

2434

2435 return false;

2436}

2437

2438std::optional

2440#define RVV_OPC_LMUL_CASE(OPC, INV) \

2441 case RISCV::OPC##_M1: \

2442 return RISCV::INV##_M1; \

2443 case RISCV::OPC##_M2: \

2444 return RISCV::INV##_M2; \

2445 case RISCV::OPC##_M4: \

2446 return RISCV::INV##_M4; \

2447 case RISCV::OPC##_M8: \

2448 return RISCV::INV##_M8; \

2449 case RISCV::OPC##_MF2: \

2450 return RISCV::INV##_MF2; \

2451 case RISCV::OPC##_MF4: \

2452 return RISCV::INV##_MF4; \

2453 case RISCV::OPC##_MF8: \

2454 return RISCV::INV##_MF8

2455

2456#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \

2457 case RISCV::OPC##_M1_MASK: \

2458 return RISCV::INV##_M1_MASK; \

2459 case RISCV::OPC##_M2_MASK: \

2460 return RISCV::INV##_M2_MASK; \

2461 case RISCV::OPC##_M4_MASK: \

2462 return RISCV::INV##_M4_MASK; \

2463 case RISCV::OPC##_M8_MASK: \

2464 return RISCV::INV##_M8_MASK; \

2465 case RISCV::OPC##_MF2_MASK: \

2466 return RISCV::INV##_MF2_MASK; \

2467 case RISCV::OPC##_MF4_MASK: \

2468 return RISCV::INV##_MF4_MASK; \

2469 case RISCV::OPC##_MF8_MASK: \

2470 return RISCV::INV##_MF8_MASK

2471

2472 switch (Opcode) {

2473 default:

2474 return std::nullopt;

2475 case RISCV::FADD_H:

2476 return RISCV::FSUB_H;

2477 case RISCV::FADD_S:

2478 return RISCV::FSUB_S;

2479 case RISCV::FADD_D:

2480 return RISCV::FSUB_D;

2481 case RISCV::FSUB_H:

2482 return RISCV::FADD_H;

2483 case RISCV::FSUB_S:

2484 return RISCV::FADD_S;

2485 case RISCV::FSUB_D:

2486 return RISCV::FADD_D;

2487 case RISCV::ADD:

2488 return RISCV::SUB;

2489 case RISCV::SUB:

2490 return RISCV::ADD;

2491 case RISCV::ADDW:

2492 return RISCV::SUBW;

2493 case RISCV::SUBW:

2494 return RISCV::ADDW;

2495

2500

2501 }

2502

2503#undef RVV_OPC_LMUL_MASK_CASE

2504#undef RVV_OPC_LMUL_CASE

2505}

2506

2509 bool DoRegPressureReduce) {

2511 return false;

2514 if (MI || isFMUL(MI->getOpcode()))

2515 return false;

2516

2519 return false;

2520

2521

2522

2523

2524

2525 if (DoRegPressureReduce && MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))

2526 return false;

2527

2528

2529 if (Root.getParent() != MI->getParent())

2530 return false;

2532}

2533

2536 bool DoRegPressureReduce) {

2540 return false;

2541 bool Added = false;

2543 DoRegPressureReduce)) {

2546 Added = true;

2547 }

2549 DoRegPressureReduce)) {

2552 Added = true;

2553 }

2554 return Added;

2555}

2556

2559 bool DoRegPressureReduce) {

2561}

2562

2563

2564

2567 unsigned CombineOpc) {

2570

2573

2574 if (MI || MI->getParent() != &MBB || MI->getOpcode() != CombineOpc)

2575 return nullptr;

2576

2577 if (MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))

2578 return nullptr;

2579

2580 return MI;

2581}

2582

2583

2584

2585

2588 unsigned OuterShiftAmt) {

2590 if (!ShiftMI)

2591 return false;

2592

2594 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)

2595 return false;

2596

2597 return true;

2598}

2599

2600

2601

2603 switch (Opc) {

2604 default:

2605 return 0;

2606 case RISCV::SH1ADD:

2607 return 1;

2608 case RISCV::SH2ADD:

2609 return 2;

2610 case RISCV::SH3ADD:

2611 return 3;

2612 }

2613}

2614

2615

2616

2618 switch (Opc) {

2619 default:

2620 return 0;

2621 case RISCV::SH1ADD_UW:

2622 return 1;

2623 case RISCV::SH2ADD_UW:

2624 return 2;

2625 case RISCV::SH3ADD_UW:

2626 return 3;

2627 }

2628}

2629

2630

2631

2635 if (!ShiftAmt)

2636 return false;

2637

2639

2641 if (!AddMI)

2642 return false;

2643

2644 bool Found = false;

2647 Found = true;

2648 }

2651 Found = true;

2652 }

2653

2654 return Found;

2655}

2656

2664 default:

2666 }

2667}

2668

2671 bool DoRegPressureReduce) const {

2672

2673 if (getFPPatterns(Root, Patterns, DoRegPressureReduce))

2674 return true;

2675

2677 return true;

2678

2680 DoRegPressureReduce);

2681}

2682

2684 switch (RootOpc) {

2685 default:

2687 case RISCV::FADD_H:

2688 return RISCV::FMADD_H;

2689 case RISCV::FADD_S:

2690 return RISCV::FMADD_S;

2691 case RISCV::FADD_D:

2692 return RISCV::FMADD_D;

2693 case RISCV::FSUB_H:

2695 : RISCV::FNMSUB_H;

2696 case RISCV::FSUB_S:

2698 : RISCV::FNMSUB_S;

2699 case RISCV::FSUB_D:

2701 : RISCV::FNMSUB_D;

2702 }

2703}

2704

2707 default:

2711 return 2;

2714 return 1;

2715 }

2716}

2717

2725

2730

2731 Register DstReg = Dst.getReg();

2736

2737 bool Mul1IsKill = Mul1.isKill();

2738 bool Mul2IsKill = Mul2.isKill();

2739 bool AddendIsKill = Addend.isKill();

2740

2741

2742

2743

2744 MRI.clearKillFlags(Mul1.getReg());

2745 MRI.clearKillFlags(Mul2.getReg());

2746

2748 BuildMI(*MF, MergedLoc, TII->get(FusedOpc), DstReg)

2753

2758}

2759

2760

2761

2762

2763static void

2771

2773 assert(OuterShiftAmt != 0 && "Unexpected opcode");

2774

2778

2780 assert(InnerShiftAmt >= OuterShiftAmt && "Unexpected shift amount");

2781

2782 unsigned InnerOpc;

2783 switch (InnerShiftAmt - OuterShiftAmt) {

2784 default:

2786 case 0:

2787 InnerOpc = RISCV::ADD;

2788 break;

2789 case 1:

2790 InnerOpc = RISCV::SH1ADD;

2791 break;

2792 case 2:

2793 InnerOpc = RISCV::SH2ADD;

2794 break;

2795 case 3:

2796 InnerOpc = RISCV::SH3ADD;

2797 break;

2798 }

2799

2803

2804 Register NewVR = MRI.createVirtualRegister(&RISCV::GPRRegClass);

2805

2813

2814 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));

2820}

2821

2829 default:

2831 DelInstrs, InstrIdxForVirtReg);

2832 return;

2837 return;

2838 }

2843 return;

2844 }

2846 genShXAddAddShift(Root, 1, InsInstrs, DelInstrs, InstrIdxForVirtReg);

2847 return;

2849 genShXAddAddShift(Root, 2, InsInstrs, DelInstrs, InstrIdxForVirtReg);

2850 return;

2851 }

2852}

2853

2857

2858 for (const auto &[Index, Operand] : enumerate(Desc.operands())) {

2860 unsigned OpType = Operand.OperandType;

2861 switch (OpType) {

2862 default:

2865 if (!MO.isImm()) {

2866 ErrInfo = "Expected an immediate operand.";

2867 return false;

2868 }

2869 int64_t Imm = MO.getImm();

2870 bool Ok;

2871 switch (OpType) {

2872 default:

2874

2875

2876#define CASE_OPERAND_UIMM(NUM) \

2877 case RISCVOp::OPERAND_UIMM##NUM: \

2878 Ok = isUInt(Imm); \

2879 break;

2880#define CASE_OPERAND_SIMM(NUM) \

2881 case RISCVOp::OPERAND_SIMM##NUM: \

2882 Ok = isInt(Imm); \

2883 break;

2899

2902 break;

2905 break;

2907 Ok = isUInt<5>(Imm) && (Imm != 0);

2908 break;

2910 Ok = isUInt<5>(Imm) && (Imm > 3);

2911 break;

2913 Ok = Imm >= 1 && Imm <= 32;

2914 break;

2917 break;

2920 break;

2923 break;

2926 break;

2929 break;

2931 Ok = isUInt<8>(Imm) && Imm >= 32;

2932 break;

2935 break;

2938 break;

2941 break;

2944 break;

2947 break;

2949 Ok = Imm == 3;

2950 break;

2952 Ok = Imm == 4;

2953 break;

2955 Ok = (isUInt<5>(Imm) && Imm != 0) || Imm == -1;

2956 break;

2957

2963

2965 Ok = Imm >= -15 && Imm <= 16;

2966 break;

2968 Ok = isInt<5>(Imm) && (Imm != 0);

2969 break;

2971 Ok = Imm != 0 && isInt<6>(Imm);

2972 break;

2975 break;

2978 break;

2981 break;

2983 Ok = isInt<16>(Imm) && (Imm != 0);

2984 break;

2987 break;

2990 break;

2993 Ok = Ok && Imm != 0;

2994 break;

2996 Ok = (isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);

2997 break;

2999 Ok = Imm >= 0 && Imm <= 10;

3000 break;

3002 Ok = Imm >= 0 && Imm <= 7;

3003 break;

3005 Ok = Imm >= 1 && Imm <= 10;

3006 break;

3008 Ok = Imm >= 2 && Imm <= 14;

3009 break;

3012 break;

3015 break;

3017 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;

3018 break;

3021 break;

3024 break;

3027 break;

3030 break;

3033 Imm;

3034 break;

3037 break;

3039 Ok = Imm == 0;

3040 break;

3045 else

3047 break;

3050 break;

3052 Ok = Imm == 1 || Imm == 2 || Imm == 4;

3053 break;

3054 }

3055 if (!Ok) {

3056 ErrInfo = "Invalid immediate";

3057 return false;

3058 }

3059 }

3060 break;

3062

3063

3064 if (MO.isReg()) {

3065 ErrInfo = "Expected a non-register operand.";

3066 return false;

3067 }

3069 ErrInfo = "Invalid immediate";

3070 return false;

3071 }

3072 break;

3075

3076

3077 if (MO.isReg()) {

3078 ErrInfo = "Expected a non-register operand.";

3079 return false;

3080 }

3082 ErrInfo = "Invalid immediate";

3083 return false;

3084 }

3085 break;

3087

3088

3089 if (MO.isReg()) {

3090 ErrInfo = "Expected a non-register operand.";

3091 return false;

3092 }

3094 ErrInfo = "Invalid immediate";

3095 return false;

3096 }

3097 break;

3099 if (MO.isImm()) {

3100 int64_t Imm = MO.getImm();

3101

3102 if (isUInt<5>(Imm) && Imm != -1) {

3103 ErrInfo = "Invalid immediate";

3104 return false;

3105 }

3106 } else if (!MO.isReg()) {

3107 ErrInfo = "Expected a register or immediate operand.";

3108 return false;

3109 }

3110 break;

3111 }

3112 }

3113

3117 if (Op.isImm() && Op.isReg()) {

3118 ErrInfo = "Invalid operand type for VL operand";

3119 return false;

3120 }

3121 if (Op.isReg() && Op.getReg().isValid()) {

3123 auto *RC = MRI.getRegClass(Op.getReg());

3124 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {

3125 ErrInfo = "Invalid register class for VL operand";

3126 return false;

3127 }

3128 }

3130 ErrInfo = "VL operand w/o SEW operand?";

3131 return false;

3132 }

3133 }

3136 if (MI.getOperand(OpIdx).isImm()) {

3137 ErrInfo = "SEW value expected to be an immediate";

3138 return false;

3139 }

3141 if (Log2SEW > 31) {

3142 ErrInfo = "Unexpected SEW value";

3143 return false;

3144 }

3145 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;

3147 ErrInfo = "Unexpected SEW value";

3148 return false;

3149 }

3150 }

3153 if (MI.getOperand(OpIdx).isImm()) {

3154 ErrInfo = "Policy operand expected to be an immediate";

3155 return false;

3156 }

3159 ErrInfo = "Invalid Policy Value";

3160 return false;

3161 }

3163 ErrInfo = "policy operand w/o VL operand?";

3164 return false;

3165 }

3166

3167

3168

3169

3170 unsigned UseOpIdx;

3171 if (MI.isRegTiedToUseOperand(0, &UseOpIdx)) {

3172 ErrInfo = "policy operand w/o tied operand?";

3173 return false;

3174 }

3175 }

3176

3179 MI.readsRegister(RISCV::FRM, nullptr)) {

3180 ErrInfo = "dynamic rounding mode should read FRM";

3181 return false;

3182 }

3183

3184 return true;

3185}

3186

3191 default:

3192 return false;

3193 case RISCV::LB:

3194 case RISCV::LBU:

3195 case RISCV::LH:

3196 case RISCV::LH_INX:

3197 case RISCV::LHU:

3198 case RISCV::LW:

3199 case RISCV::LW_INX:

3200 case RISCV::LWU:

3201 case RISCV::LD:

3202 case RISCV::LD_RV32:

3203 case RISCV::FLH:

3204 case RISCV::FLW:

3205 case RISCV::FLD:

3206 case RISCV::SB:

3207 case RISCV::SH:

3208 case RISCV::SH_INX:

3209 case RISCV::SW:

3210 case RISCV::SW_INX:

3211 case RISCV::SD:

3212 case RISCV::SD_RV32:

3213 case RISCV::FSH:

3214 case RISCV::FSW:

3215 case RISCV::FSD:

3216 break;

3217 }

3218

3220 return false;

3221

3224 return false;

3225

3228 int64_t NewOffset = OldOffset + Disp;

3229 if (STI.is64Bit())

3231

3233 return false;

3234

3240 return true;

3241}

3242

3245

3248

3250 "Addressing mode not supported for folding");

3251

3259}

3260

3261

3262

3263

3265 switch (Opc) {

3266 default:

3267 return false;

3268 case RISCV::SW:

3269 case RISCV::SD:

3270 case RISCV::LD:

3271 case RISCV::LW:

3272 return true;

3273 }

3274}

3275

3278

3280 return false;

3281

3283 return true;

3284

3286

3287

3289 return false;

3290

3292 return false;

3293

3294 return true;

3295}

3296

3302 return false;

3303

3304

3306 case RISCV::LB:

3307 case RISCV::LBU:

3308 case RISCV::SB:

3309 case RISCV::LH:

3310 case RISCV::LH_INX:

3311 case RISCV::LHU:

3312 case RISCV::FLH:

3313 case RISCV::SH:

3314 case RISCV::SH_INX:

3315 case RISCV::FSH:

3316 case RISCV::LW:

3317 case RISCV::LW_INX:

3318 case RISCV::LWU:

3319 case RISCV::FLW:

3320 case RISCV::SW:

3321 case RISCV::SW_INX:

3322 case RISCV::FSW:

3323 case RISCV::LD:

3324 case RISCV::LD_RV32:

3325 case RISCV::FLD:

3326 case RISCV::SD:

3327 case RISCV::SD_RV32:

3328 case RISCV::FSD:

3329 break;

3330 default:

3331 return false;

3332 }

3334 OffsetIsScalable = false;

3336 return false;

3338 return true;

3339}

3340

3341

3342

3347

3348

3349

3350 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front()))

3351 return true;

3352

3354 return false;

3355

3358 if (MO1->getAddrSpace() != MO2->getAddrSpace())

3359 return false;

3360

3361 auto Base1 = MO1->getValue();

3362 auto Base2 = MO2->getValue();

3363 if (!Base1 || !Base2)

3364 return false;

3367

3369 return false;

3370

3371 return Base1 == Base2;

3372}

3373

3377 int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize,

3378 unsigned NumBytes) const {

3379

3380

3381 if (!BaseOps1.empty() && !BaseOps2.empty()) {

3383 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();

3385 return false;

3386 } else if (!BaseOps1.empty() || !BaseOps2.empty()) {

3387

3388 return false;

3389 }

3390

3392 BaseOps1.front()->getParent()->getMF()->getSubtarget().getCacheLineSize();

3393

3395

3396

3397

3398 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) < CacheLineSize;

3399}

3400

3401

3402

3403

3404

3405

3406

3407

3412 return false;

3413

3414

3415

3416

3418 return false;

3421 return false;

3422

3424 return false;

3425

3429 return true;

3430}

3431

3436

3439 return false;

3440

3441

3442

3443

3444

3445

3447 const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;

3448 int64_t OffsetA = 0, OffsetB = 0;

3454 int LowOffset = std::min(OffsetA, OffsetB);

3455 int HighOffset = std::max(OffsetA, OffsetB);

3456 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;

3458 LowOffset + (int)LowWidth.getValue() <= HighOffset)

3459 return true;

3460 }

3461 }

3462 return false;

3463}

3464

3465std::pair<unsigned, unsigned>

3468 return std::make_pair(TF & Mask, TF & ~Mask);

3469}

3470

3473 using namespace RISCVII;

3474 static const std::pair<unsigned, const char *> TargetFlags[] = {

3475 {MO_CALL, "riscv-call"},

3476 {MO_LO, "riscv-lo"},

3477 {MO_HI, "riscv-hi"},

3478 {MO_PCREL_LO, "riscv-pcrel-lo"},

3479 {MO_PCREL_HI, "riscv-pcrel-hi"},

3480 {MO_GOT_HI, "riscv-got-hi"},

3481 {MO_TPREL_LO, "riscv-tprel-lo"},

3482 {MO_TPREL_HI, "riscv-tprel-hi"},

3483 {MO_TPREL_ADD, "riscv-tprel-add"},

3484 {MO_TLS_GOT_HI, "riscv-tls-got-hi"},

3485 {MO_TLS_GD_HI, "riscv-tls-gd-hi"},

3486 {MO_TLSDESC_HI, "riscv-tlsdesc-hi"},

3487 {MO_TLSDESC_LOAD_LO, "riscv-tlsdesc-load-lo"},

3488 {MO_TLSDESC_ADD_LO, "riscv-tlsdesc-add-lo"},

3489 {MO_TLSDESC_CALL, "riscv-tlsdesc-call"}};

3490 return ArrayRef(TargetFlags);

3491}

3493 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {

3495

3496

3497 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())

3498 return false;

3499

3500

3501

3502 if (F.hasSection())

3503 return false;

3504

3505

3506 return true;

3507}

3508

3510 unsigned &Flags) const {

3511

3513}

3514

3515

3520

3525

3529 return F.getFnAttribute("fentry-call").getValueAsBool() ||

3530 F.hasFnAttribute("patchable-function-entry");

3531}

3532

3535 return MI.readsRegister(RegNo, TRI) ||

3536 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);

3537}

3538

3541 return MI.modifiesRegister(RegNo, TRI) ||

3542 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);

3543}

3544

3546 if (MBB.back().isReturn())

3547 return true;

3549 return true;

3550

3551

3552

3553

3559 return true;

3561 break;

3562 }

3563 return false;

3564}

3565

3567

3568

3569 if (C.back().isReturn()) {

3571 "The candidate who uses return instruction must be outlined "

3572 "using tail call");

3573 return false;

3574 }

3575

3576

3577

3581 }))

3582 return true;

3583

3584 return C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);

3585}

3586

3587std::optional<std::unique_ptroutliner::OutlinedFunction>

3590 std::vectoroutliner::Candidate &RepeatedSequenceLocs,

3591 unsigned MinRepeats) const {

3592

3593

3595

3596

3597 if (RepeatedSequenceLocs.size() < MinRepeats)

3598 return std::nullopt;

3599

3600

3602 unsigned InstrSizeCExt =

3604 unsigned CallOverhead = 0, FrameOverhead = 0;

3605

3606

3607 unsigned CFICount = 0;

3608 for (auto &I : Candidate) {

3609 if (I.isCFIInstruction())

3610 CFICount++;

3611 }

3612

3613

3614

3615

3616

3617

3618

3620 std::vector CFIInstructions =

3621 C.getMF()->getFrameInstructions();

3622

3623 if (CFICount > 0 && CFICount != CFIInstructions.size())

3624 return std::nullopt;

3625 }

3626

3630

3631

3632 CallOverhead = 4 + InstrSizeCExt;

3633

3634 FrameOverhead = 0;

3635 } else {

3636

3637 CallOverhead = 8;

3638

3639 FrameOverhead = InstrSizeCExt;

3640 }

3641

3642

3643

3645 return std::nullopt;

3646

3647 for (auto &C : RepeatedSequenceLocs)

3648 C.setCallInfo(MOCI, CallOverhead);

3649

3650 unsigned SequenceSize = 0;

3651 for (auto &MI : Candidate)

3653

3654 return std::make_uniqueoutliner::OutlinedFunction(

3655 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);

3656}

3657

3661 unsigned Flags) const {

3665 MBB->getParent()->getSubtarget().getRegisterInfo();

3666 const auto &F = MI.getMF()->getFunction();

3667

3668

3669

3670

3671 if (MI.isCFIInstruction())

3673

3677

3678

3679 for (const auto &MO : MI.operands()) {

3680

3681

3682

3684 (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() ||

3685 F.hasSection() || F.getSectionPrefix()))

3687 }

3688

3689 if (isLPAD(MI))

3691

3693}

3694

3698

3700 return;

3701

3702 MBB.addLiveIn(RISCV::X5);

3703

3704

3707 .addReg(RISCV::X5)

3708 .addImm(0));

3709}

3710

3714

3717 .addGlobalAddress(M.getNamedValue(MF.getName()),

3719 return It;

3720 }

3721

3722

3723 It = MBB.insert(It,

3725 .addGlobalAddress(M.getNamedValue(MF.getName()), 0,

3727 return It;

3728}

3729

3732

3733

3735 if (!Op0.isReg() || Reg != Op0.getReg())

3736 return std::nullopt;

3737

3738

3739

3740 if (MI.getOpcode() == RISCV::ADDI && MI.getOperand(1).isReg() &&

3741 MI.getOperand(2).isImm())

3742 return RegImmPair{MI.getOperand(1).getReg(), MI.getOperand(2).getImm()};

3743

3744 return std::nullopt;

3745}

3746

3747

3751

3752 std::string GenericComment =

3754 if (!GenericComment.empty())

3755 return GenericComment;

3756

3757

3758 if (Op.isImm())

3759 return std::string();

3760

3762 if (OpIdx >= Desc.getNumOperands())

3763 return std::string();

3764

3765 std::string Comment;

3767

3769

3770

3771

3772 switch (OpInfo.OperandType) {

3775 unsigned Imm = Op.getImm();

3777 break;

3778 }

3780 unsigned Imm = Op.getImm();

3782 break;

3783 }

3785 unsigned Imm = Op.getImm();

3786 OS << "w" << Imm;

3787 break;

3788 }

3791 unsigned Log2SEW = Op.getImm();

3792 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;

3794 OS << "e" << SEW;

3795 break;

3796 }

3798 unsigned Policy = Op.getImm();

3800 "Invalid Policy Value");

3803 break;

3804 }

3805

3806 return Comment;

3807}

3808

3809

3810#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \

3811 RISCV::Pseudo##OP##_##LMUL

3812

3813#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \

3814 RISCV::Pseudo##OP##_##LMUL##_MASK

3815

3816#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \

3817 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \

3818 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)

3819

3820#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \

3821 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \

3822 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \

3823 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \

3824 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \

3825 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \

3826 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)

3827

3828#define CASE_RVV_OPCODE_UNMASK(OP) \

3829 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \

3830 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)

3831

3832#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \

3833 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \

3834 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \

3835 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \

3836 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \

3837 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \

3838 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)

3839

3840#define CASE_RVV_OPCODE_MASK(OP) \

3841 CASE_RVV_OPCODE_MASK_WIDEN(OP): \

3842 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)

3843

3844#define CASE_RVV_OPCODE_WIDEN(OP) \

3845 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \

3846 case CASE_RVV_OPCODE_MASK_WIDEN(OP)

3847

3848#define CASE_RVV_OPCODE(OP) \

3849 CASE_RVV_OPCODE_UNMASK(OP): \

3850 case CASE_RVV_OPCODE_MASK(OP)

3851

3852

3853

3854#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \

3855 RISCV::PseudoV##OP##_##TYPE##_##LMUL

3856

3857#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \

3858 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \

3859 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \

3860 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \

3861 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \

3862 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \

3863 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \

3864 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)

3865

3866

3867#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \

3868 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW

3869

3870#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \

3871 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \

3872 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \

3873 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \

3874 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)

3875

3876#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \

3877 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \

3878 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)

3879

3880#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \

3881 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \

3882 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)

3883

3884#define CASE_VFMA_OPCODE_VV(OP) \

3885 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \

3886 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \

3887 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \

3888 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)

3889

3890#define CASE_VFMA_SPLATS(OP) \

3891 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \

3892 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \

3893 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \

3894 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)

3895

3896

3898 unsigned &SrcOpIdx1,

3899 unsigned &SrcOpIdx2) const {

3901 if (Desc.isCommutable())

3902 return false;

3903

3904 switch (MI.getOpcode()) {

3905 case RISCV::TH_MVEQZ:

3906 case RISCV::TH_MVNEZ:

3907

3908

3909

3910 if (MI.getOperand(2).getReg() == RISCV::X0)

3911 return false;

3912

3913 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);

3914 case RISCV::QC_SELECTIEQ:

3915 case RISCV::QC_SELECTINE:

3916 case RISCV::QC_SELECTIIEQ:

3917 case RISCV::QC_SELECTIINE:

3918 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);

3919 case RISCV::QC_MVEQ:

3920 case RISCV::QC_MVNE:

3921 case RISCV::QC_MVLT:

3922 case RISCV::QC_MVGE:

3923 case RISCV::QC_MVLTU:

3924 case RISCV::QC_MVGEU:

3925 case RISCV::QC_MVEQI:

3926 case RISCV::QC_MVNEI:

3927 case RISCV::QC_MVLTI:

3928 case RISCV::QC_MVGEI:

3929 case RISCV::QC_MVLTUI:

3930 case RISCV::QC_MVGEUI:

3931 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);

3932 case RISCV::TH_MULA:

3933 case RISCV::TH_MULAW:

3934 case RISCV::TH_MULAH:

3935 case RISCV::TH_MULS:

3936 case RISCV::TH_MULSW:

3937 case RISCV::TH_MULSH:

3938

3939 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);

3940 case RISCV::PseudoCCMOVGPRNoX0:

3941 case RISCV::PseudoCCMOVGPR:

3942

3943 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);

3969

3970 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);

3989

3992 1) == 0)

3993 return false;

3994

3995

3996

3997 unsigned CommutableOpIdx1 = 1;

3998 unsigned CommutableOpIdx2 = 3;

3999 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,

4000 CommutableOpIdx2))

4001 return false;

4002 return true;

4003 }

4010

4013 1) == 0)

4014 return false;

4015

4016

4017

4018

4019

4020 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)

4021 return false;

4022 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)

4023 return false;

4024

4025

4026 if (SrcOpIdx1 != CommuteAnyOperandIndex &&

4027 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)

4028 return false;

4029

4030

4031

4032

4033 if (SrcOpIdx1 == CommuteAnyOperandIndex ||

4034 SrcOpIdx2 == CommuteAnyOperandIndex) {

4035

4036

4037 unsigned CommutableOpIdx1 = SrcOpIdx1;

4038 if (SrcOpIdx1 == SrcOpIdx2) {

4039

4040

4041 CommutableOpIdx1 = 1;

4042 } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {

4043

4044 CommutableOpIdx1 = SrcOpIdx2;

4045 }

4046

4047

4048

4049 unsigned CommutableOpIdx2;

4050 if (CommutableOpIdx1 != 1) {

4051

4052 CommutableOpIdx2 = 1;

4053 } else {

4054 Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();

4055

4056

4057

4058

4059 if (Op1Reg != MI.getOperand(2).getReg())

4060 CommutableOpIdx2 = 2;

4061 else

4062 CommutableOpIdx2 = 3;

4063 }

4064

4065

4066

4067 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,

4068 CommutableOpIdx2))

4069 return false;

4070 }

4071

4072 return true;

4073 }

4074 }

4075

4077}

4078

4079

4080#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \

4081 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \

4082 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \

4083 break;

4084

4085#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \

4086 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \

4087 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \

4088 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \

4089 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \

4090 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \

4091 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \

4092 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)

4093

4094

4095#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \

4096 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \

4097 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \

4098 break;

4099

4100#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \

4101 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \

4102 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \

4103 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \

4104 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)

4105

4106#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \

4107 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \

4108 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)

4109

4110#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \

4111 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \

4112 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)

4113

4114#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \

4115 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \

4116 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \

4117 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \

4118 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)

4119

4120#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \

4121 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \

4122 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \

4123 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \

4124 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)

4125

4126

4128 bool NewMI,

4129 unsigned OpIdx1,

4130 unsigned OpIdx2) const {

4132 if (NewMI)

4133 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);

4134 return MI;

4135 };

4136

4137 switch (MI.getOpcode()) {

4138 case RISCV::TH_MVEQZ:

4139 case RISCV::TH_MVNEZ: {

4140 auto &WorkingMI = cloneIfNew(MI);

4141 WorkingMI.setDesc(get(MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ

4142 : RISCV::TH_MVEQZ));

4144 OpIdx2);

4145 }

4146 case RISCV::QC_SELECTIEQ:

4147 case RISCV::QC_SELECTINE:

4148 case RISCV::QC_SELECTIIEQ:

4149 case RISCV::QC_SELECTIINE:

4151 case RISCV::QC_MVEQ:

4152 case RISCV::QC_MVNE:

4153 case RISCV::QC_MVLT:

4154 case RISCV::QC_MVGE:

4155 case RISCV::QC_MVLTU:

4156 case RISCV::QC_MVGEU:

4157 case RISCV::QC_MVEQI:

4158 case RISCV::QC_MVNEI:

4159 case RISCV::QC_MVLTI:

4160 case RISCV::QC_MVGEI:

4161 case RISCV::QC_MVLTUI:

4162 case RISCV::QC_MVGEUI: {

4163 auto &WorkingMI = cloneIfNew(MI);

4166 OpIdx2);

4167 }

4168 case RISCV::PseudoCCMOVGPRNoX0:

4169 case RISCV::PseudoCCMOVGPR: {

4170

4173 auto &WorkingMI = cloneIfNew(MI);

4174 WorkingMI.getOperand(3).setImm(CC);

4176 OpIdx1, OpIdx2);

4177 }

4196

4197

4198 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");

4199 assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");

4200 unsigned Opc;

4201 switch (MI.getOpcode()) {

4202 default:

4222 }

4223

4224 auto &WorkingMI = cloneIfNew(MI);

4225 WorkingMI.setDesc(get(Opc));

4227 OpIdx1, OpIdx2);

4228 }

4235 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");

4236

4237

4238 if (OpIdx1 == 3 || OpIdx2 == 3) {

4239 unsigned Opc;

4240 switch (MI.getOpcode()) {

4241 default:

4249 }

4250

4251 auto &WorkingMI = cloneIfNew(MI);

4252 WorkingMI.setDesc(get(Opc));

4254 OpIdx1, OpIdx2);

4255 }

4256

4257 break;

4258 }

4259 }

4260

4262}

4263

4264#undef CASE_VMA_CHANGE_OPCODE_COMMON

4265#undef CASE_VMA_CHANGE_OPCODE_LMULS

4266#undef CASE_VFMA_CHANGE_OPCODE_COMMON

4267#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1

4268#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2

4269#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4

4270#undef CASE_VFMA_CHANGE_OPCODE_VV

4271#undef CASE_VFMA_CHANGE_OPCODE_SPLATS

4272

4273#undef CASE_RVV_OPCODE_UNMASK_LMUL

4274#undef CASE_RVV_OPCODE_MASK_LMUL

4275#undef CASE_RVV_OPCODE_LMUL

4276#undef CASE_RVV_OPCODE_UNMASK_WIDEN

4277#undef CASE_RVV_OPCODE_UNMASK

4278#undef CASE_RVV_OPCODE_MASK_WIDEN

4279#undef CASE_RVV_OPCODE_MASK

4280#undef CASE_RVV_OPCODE_WIDEN

4281#undef CASE_RVV_OPCODE

4282

4283#undef CASE_VMA_OPCODE_COMMON

4284#undef CASE_VMA_OPCODE_LMULS

4285#undef CASE_VFMA_OPCODE_COMMON

4286#undef CASE_VFMA_OPCODE_LMULS_M1

4287#undef CASE_VFMA_OPCODE_LMULS_MF2

4288#undef CASE_VFMA_OPCODE_LMULS_MF4

4289#undef CASE_VFMA_OPCODE_VV

4290#undef CASE_VFMA_SPLATS

4291

4293 switch (MI.getOpcode()) {

4294 default:

4295 break;

4296 case RISCV::ADD:

4297 case RISCV::OR:

4298 case RISCV::XOR:

4299

4300

4301 if (MI.getOperand(1).getReg() == RISCV::X0)

4302 commuteInstruction(MI);

4303

4304 if (MI.getOperand(2).getReg() == RISCV::X0) {

4305 MI.getOperand(2).ChangeToImmediate(0);

4306 MI.setDesc(get(RISCV::ADDI));

4307 return true;

4308 }

4309

4310 if (MI.getOpcode() == RISCV::XOR &&

4311 MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {

4312 MI.getOperand(1).setReg(RISCV::X0);

4313 MI.getOperand(2).ChangeToImmediate(0);

4314 MI.setDesc(get(RISCV::ADDI));

4315 return true;

4316 }

4317 break;

4318 case RISCV::ORI:

4319 case RISCV::XORI:

4320

4321 if (MI.getOperand(1).getReg() == RISCV::X0) {

4322 MI.setDesc(get(RISCV::ADDI));

4323 return true;

4324 }

4325 break;

4326 case RISCV::SUB:

4327

4328 if (MI.getOperand(2).getReg() == RISCV::X0) {

4329 MI.getOperand(2).ChangeToImmediate(0);

4330 MI.setDesc(get(RISCV::ADDI));

4331 return true;

4332 }

4333 break;

4334 case RISCV::SUBW:

4335

4336 if (MI.getOperand(2).getReg() == RISCV::X0) {

4337 MI.getOperand(2).ChangeToImmediate(0);

4338 MI.setDesc(get(RISCV::ADDIW));

4339 return true;

4340 }

4341 break;

4342 case RISCV::ADDW:

4343

4344

4345 if (MI.getOperand(1).getReg() == RISCV::X0)

4346 commuteInstruction(MI);

4347

4348 if (MI.getOperand(2).getReg() == RISCV::X0) {

4349 MI.getOperand(2).ChangeToImmediate(0);

4350 MI.setDesc(get(RISCV::ADDIW));

4351 return true;

4352 }

4353 break;

4354 case RISCV::SH1ADD:

4355 case RISCV::SH1ADD_UW:

4356 case RISCV::SH2ADD:

4357 case RISCV::SH2ADD_UW:

4358 case RISCV::SH3ADD:

4359 case RISCV::SH3ADD_UW:

4360

4361 if (MI.getOperand(1).getReg() == RISCV::X0) {

4362 MI.removeOperand(1);

4364 MI.setDesc(get(RISCV::ADDI));

4365 return true;

4366 }

4367

4368 if (MI.getOperand(2).getReg() == RISCV::X0) {

4369 MI.removeOperand(2);

4370 unsigned Opc = MI.getOpcode();

4371 if (Opc == RISCV::SH1ADD_UW || Opc == RISCV::SH2ADD_UW ||

4372 Opc == RISCV::SH3ADD_UW) {

4374 MI.setDesc(get(RISCV::SLLI_UW));

4375 return true;

4376 }

4378 MI.setDesc(get(RISCV::SLLI));

4379 return true;

4380 }

4381 break;

4382 case RISCV::AND:

4383 case RISCV::MUL:

4384 case RISCV::MULH:

4385 case RISCV::MULHSU:

4386 case RISCV::MULHU:

4387 case RISCV::MULW:

4388

4389

4390

4391

4392 if (MI.getOperand(1).getReg() == RISCV::X0 ||

4393 MI.getOperand(2).getReg() == RISCV::X0) {

4394 MI.getOperand(1).setReg(RISCV::X0);

4395 MI.getOperand(2).ChangeToImmediate(0);

4396 MI.setDesc(get(RISCV::ADDI));

4397 return true;

4398 }

4399 break;

4400 case RISCV::ANDI:

4401

4402 if (MI.getOperand(1).getReg() == RISCV::X0) {

4403 MI.getOperand(2).setImm(0);

4404 MI.setDesc(get(RISCV::ADDI));

4405 return true;

4406 }

4407 break;

4408 case RISCV::SLL:

4409 case RISCV::SRL:

4410 case RISCV::SRA:

4411

4412 if (MI.getOperand(1).getReg() == RISCV::X0) {

4413 MI.getOperand(2).ChangeToImmediate(0);

4414 MI.setDesc(get(RISCV::ADDI));

4415 return true;

4416 }

4417

4418 if (MI.getOperand(2).getReg() == RISCV::X0) {

4419 MI.getOperand(2).ChangeToImmediate(0);

4420 MI.setDesc(get(RISCV::ADDI));

4421 return true;

4422 }

4423 break;

4424 case RISCV::SLLW:

4425 case RISCV::SRLW:

4426 case RISCV::SRAW:

4427

4428 if (MI.getOperand(1).getReg() == RISCV::X0) {

4429 MI.getOperand(2).ChangeToImmediate(0);

4430 MI.setDesc(get(RISCV::ADDI));

4431 return true;

4432 }

4433 break;

4434 case RISCV::SLLI:

4435 case RISCV::SRLI:

4436 case RISCV::SRAI:

4437 case RISCV::SLLIW:

4438 case RISCV::SRLIW:

4439 case RISCV::SRAIW:

4440 case RISCV::SLLI_UW:

4441

4442 if (MI.getOperand(1).getReg() == RISCV::X0) {

4443 MI.getOperand(2).setImm(0);

4444 MI.setDesc(get(RISCV::ADDI));

4445 return true;

4446 }

4447 break;

4448 case RISCV::SLTU:

4449 case RISCV::ADD_UW:

4450

4451

4452 if (MI.getOperand(1).getReg() == RISCV::X0 &&

4453 MI.getOperand(2).getReg() == RISCV::X0) {

4454 MI.getOperand(2).ChangeToImmediate(0);

4455 MI.setDesc(get(RISCV::ADDI));

4456 return true;

4457 }

4458

4459 if (MI.getOpcode() == RISCV::ADD_UW &&

4460 MI.getOperand(1).getReg() == RISCV::X0) {

4461 MI.removeOperand(1);

4463 MI.setDesc(get(RISCV::ADDI));

4464 }

4465 break;

4466 case RISCV::SLTIU:

4467

4468

4469 if (MI.getOperand(1).getReg() == RISCV::X0) {

4470 MI.getOperand(2).setImm(MI.getOperand(2).getImm() != 0);

4471 MI.setDesc(get(RISCV::ADDI));

4472 return true;

4473 }

4474 break;

4475 case RISCV::SEXT_H:

4476 case RISCV::SEXT_B:

4477 case RISCV::ZEXT_H_RV32:

4478 case RISCV::ZEXT_H_RV64:

4479

4480

4481 if (MI.getOperand(1).getReg() == RISCV::X0) {

4483 MI.setDesc(get(RISCV::ADDI));

4484 return true;

4485 }

4486 break;

4487 case RISCV::MIN:

4488 case RISCV::MINU:

4489 case RISCV::MAX:

4490 case RISCV::MAXU:

4491

4492 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {

4493 MI.getOperand(2).ChangeToImmediate(0);

4494 MI.setDesc(get(RISCV::ADDI));

4495 return true;

4496 }

4497 break;

4498 case RISCV::BEQ:

4499 case RISCV::BNE:

4500

4501 if (MI.getOperand(0).getReg() == RISCV::X0) {

4503 MI.removeOperand(0);

4504 MI.insert(MI.operands_begin() + 1, {MO0});

4505 }

4506 break;

4507 case RISCV::BLTU:

4508

4509 if (MI.getOperand(0).getReg() == RISCV::X0) {

4511 MI.removeOperand(0);

4512 MI.insert(MI.operands_begin() + 1, {MO0});

4513 MI.setDesc(get(RISCV::BNE));

4514 }

4515 break;

4516 case RISCV::BGEU:

4517

4518 if (MI.getOperand(0).getReg() == RISCV::X0) {

4520 MI.removeOperand(0);

4521 MI.insert(MI.operands_begin() + 1, {MO0});

4522 MI.setDesc(get(RISCV::BEQ));

4523 }

4524 break;

4525 }

4526 return false;

4527}

4528

4529

4530#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \

4531 RISCV::PseudoV##OP##_##LMUL##_TIED

4532

4533#define CASE_WIDEOP_OPCODE_LMULS(OP) \

4534 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \

4535 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \

4536 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \

4537 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \

4538 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \

4539 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)

4540

4541#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \

4542 case RISCV::PseudoV##OP##_##LMUL##_TIED: \

4543 NewOpc = RISCV::PseudoV##OP##_##LMUL; \

4544 break;

4545

4546#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \

4547 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \

4548 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \

4549 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \

4550 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \

4551 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \

4552 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)

4553

4554

4555#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \

4556 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED

4557

4558#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \

4559 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \

4560 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \

4561 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \

4562 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \

4563 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \

4564 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \

4565 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \

4566 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \

4567 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \

4568

4569#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \

4570 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \

4571 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \

4572 break;

4573

4574#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \

4575 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \

4576 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \

4577 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \

4578 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \

4579 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \

4580 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \

4581 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \

4582 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \

4583 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \

4584

4585#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \

4586 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \

4587 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \

4588 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \

4589 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \

4590 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)

4591

4592#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \

4593 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \

4594 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \

4595 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \

4596 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \

4597 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)

4598

4599

4604 switch (MI.getOpcode()) {

4605 default:

4606 return nullptr;

4612 MI.getNumExplicitOperands() == 7 &&

4613 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");

4614

4616 1) == 0)

4617 return nullptr;

4618

4619 unsigned NewOpc;

4620 switch (MI.getOpcode()) {

4621 default:

4627 }

4628

4629

4632 .add(MI.getOperand(0))

4634 .add(MI.getOperand(1))

4635 .add(MI.getOperand(2))

4636 .add(MI.getOperand(3))

4637 .add(MI.getOperand(4))

4638 .add(MI.getOperand(5))

4639 .add(MI.getOperand(6));

4640 break;

4641 }

4646

4648 MI.getNumExplicitOperands() == 6);

4650 1) == 0)

4651 return nullptr;

4652

4653

4654 unsigned NewOpc;

4655 switch (MI.getOpcode()) {

4656 default:

4662 }

4663

4664

4667 .add(MI.getOperand(0))

4669 .add(MI.getOperand(1))

4670 .add(MI.getOperand(2))

4671 .add(MI.getOperand(3))

4672 .add(MI.getOperand(4))

4673 .add(MI.getOperand(5));

4674 break;

4675 }

4676 }

4678

4679 if (LV) {

4680 unsigned NumOps = MI.getNumOperands();

4681 for (unsigned I = 1; I < NumOps; ++I) {

4683 if (Op.isReg() && Op.isKill())

4685 }

4686 }

4687

4688 if (LIS) {

4690

4691 if (MI.getOperand(0).isEarlyClobber()) {

4692

4693

4694

4699 }

4700 }

4701

4702 return MIB;

4703}

4704

4705#undef CASE_WIDEOP_OPCODE_COMMON

4706#undef CASE_WIDEOP_OPCODE_LMULS

4707#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON

4708#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS

4709#undef CASE_FP_WIDEOP_OPCODE_COMMON

4710#undef CASE_FP_WIDEOP_OPCODE_LMULS

4711#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON

4712#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS

4713

4721 if (ShiftAmount == 0)

4722 return;

4725 .addImm(ShiftAmount)

4727 } else if (int ShXAmount, ShiftAmount;

4728 STI.hasShlAdd(3) &&

4729 (ShXAmount = isShifted359(Amount, ShiftAmount)) != 0) {

4730

4731 unsigned Opc;

4732 switch (ShXAmount) {

4733 case 1:

4734 Opc = RISCV::SH1ADD;

4735 break;

4736 case 2:

4737 Opc = RISCV::SH2ADD;

4738 break;

4739 case 3:

4740 Opc = RISCV::SH3ADD;

4741 break;

4742 default:

4744 }

4745 if (ShiftAmount)

4748 .addImm(ShiftAmount)

4755 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);

4759 .addImm(ShiftAmount)

4766 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);

4770 .addImm(ShiftAmount)

4776 } else if (STI.hasStdExtZmmul()) {

4777 Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);

4783 } else {

4785 uint32_t PrevShiftAmount = 0;

4786 for (uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {

4787 if (Amount & (1U << ShiftAmount)) {

4788 if (ShiftAmount)

4791 .addImm(ShiftAmount - PrevShiftAmount)

4793 if (Amount >> (ShiftAmount + 1)) {

4794

4795 if (!Acc) {

4796 Acc = MRI.createVirtualRegister(&RISCV::GPRRegClass);

4800 } else {

4805 }

4806 }

4807 PrevShiftAmount = ShiftAmount;

4808 }

4809 }

4810 assert(Acc && "Expected valid accumulator");

4815 }

4816}

4817

4820 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =

4823 return ArrayRef(TargetFlags);

4824}

4825

4828 ? STI.getTailDupAggressiveThreshold()

4829 : 2;

4830}

4831

4833

4834

4835 unsigned Opcode = MI.getOpcode();

4836 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&

4838 return false;

4839 return true;

4840}

4841

4842std::optional<std::pair<unsigned, unsigned>>

4844 switch (Opcode) {

4845 default:

4846 return std::nullopt;

4847 case RISCV::PseudoVSPILL2_M1:

4848 case RISCV::PseudoVRELOAD2_M1:

4849 return std::make_pair(2u, 1u);

4850 case RISCV::PseudoVSPILL2_M2:

4851 case RISCV::PseudoVRELOAD2_M2:

4852 return std::make_pair(2u, 2u);

4853 case RISCV::PseudoVSPILL2_M4:

4854 case RISCV::PseudoVRELOAD2_M4:

4855 return std::make_pair(2u, 4u);

4856 case RISCV::PseudoVSPILL3_M1:

4857 case RISCV::PseudoVRELOAD3_M1:

4858 return std::make_pair(3u, 1u);

4859 case RISCV::PseudoVSPILL3_M2:

4860 case RISCV::PseudoVRELOAD3_M2:

4861 return std::make_pair(3u, 2u);

4862 case RISCV::PseudoVSPILL4_M1:

4863 case RISCV::PseudoVRELOAD4_M1:

4864 return std::make_pair(4u, 1u);

4865 case RISCV::PseudoVSPILL4_M2:

4866 case RISCV::PseudoVRELOAD4_M2:

4867 return std::make_pair(4u, 2u);

4868 case RISCV::PseudoVSPILL5_M1:

4869 case RISCV::PseudoVRELOAD5_M1:

4870 return std::make_pair(5u, 1u);

4871 case RISCV::PseudoVSPILL6_M1:

4872 case RISCV::PseudoVRELOAD6_M1:

4873 return std::make_pair(6u, 1u);

4874 case RISCV::PseudoVSPILL7_M1:

4875 case RISCV::PseudoVRELOAD7_M1:

4876 return std::make_pair(7u, 1u);

4877 case RISCV::PseudoVSPILL8_M1:

4878 case RISCV::PseudoVRELOAD8_M1:

4879 return std::make_pair(8u, 1u);

4880 }

4881}

4882

4884 int16_t MI1FrmOpIdx =

4885 RISCV::getNamedOperandIdx(MI1.getOpcode(), RISCV::OpName::frm);

4886 int16_t MI2FrmOpIdx =

4887 RISCV::getNamedOperandIdx(MI2.getOpcode(), RISCV::OpName::frm);

4888 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)

4889 return false;

4893}

4894

4895std::optional

4897 switch (Opcode) {

4898 default:

4899 return std::nullopt;

4900

4901

4902 case RISCV::VSLL_VX:

4903 case RISCV::VSRL_VX:

4904 case RISCV::VSRA_VX:

4905

4906 case RISCV::VSSRL_VX:

4907 case RISCV::VSSRA_VX:

4908

4909 case RISCV::VROL_VX:

4910 case RISCV::VROR_VX:

4911

4912 return Log2SEW;

4913

4914

4915 case RISCV::VNSRL_WX:

4916 case RISCV::VNSRA_WX:

4917

4918 case RISCV::VNCLIPU_WX:

4919 case RISCV::VNCLIP_WX:

4920

4921 case RISCV::VWSLL_VX:

4922

4923 return Log2SEW + 1;

4924

4925

4926 case RISCV::VADD_VX:

4927 case RISCV::VSUB_VX:

4928 case RISCV::VRSUB_VX:

4929

4930 case RISCV::VWADDU_VX:

4931 case RISCV::VWSUBU_VX:

4932 case RISCV::VWADD_VX:

4933 case RISCV::VWSUB_VX:

4934 case RISCV::VWADDU_WX:

4935 case RISCV::VWSUBU_WX:

4936 case RISCV::VWADD_WX:

4937 case RISCV::VWSUB_WX:

4938

4939 case RISCV::VADC_VXM:

4940 case RISCV::VADC_VIM:

4941 case RISCV::VMADC_VXM:

4942 case RISCV::VMADC_VIM:

4943 case RISCV::VMADC_VX:

4944 case RISCV::VSBC_VXM:

4945 case RISCV::VMSBC_VXM:

4946 case RISCV::VMSBC_VX:

4947

4948 case RISCV::VAND_VX:

4949 case RISCV::VOR_VX:

4950 case RISCV::VXOR_VX:

4951

4952 case RISCV::VMSEQ_VX:

4953 case RISCV::VMSNE_VX:

4954 case RISCV::VMSLTU_VX:

4955 case RISCV::VMSLT_VX:

4956 case RISCV::VMSLEU_VX:

4957 case RISCV::VMSLE_VX:

4958 case RISCV::VMSGTU_VX:

4959 case RISCV::VMSGT_VX:

4960

4961 case RISCV::VMINU_VX:

4962 case RISCV::VMIN_VX:

4963 case RISCV::VMAXU_VX:

4964 case RISCV::VMAX_VX:

4965

4966 case RISCV::VMUL_VX:

4967 case RISCV::VMULH_VX:

4968 case RISCV::VMULHU_VX:

4969 case RISCV::VMULHSU_VX:

4970

4971 case RISCV::VDIVU_VX:

4972 case RISCV::VDIV_VX:

4973 case RISCV::VREMU_VX:

4974 case RISCV::VREM_VX:

4975

4976 case RISCV::VWMUL_VX:

4977 case RISCV::VWMULU_VX:

4978 case RISCV::VWMULSU_VX:

4979

4980 case RISCV::VMACC_VX:

4981 case RISCV::VNMSAC_VX:

4982 case RISCV::VMADD_VX:

4983 case RISCV::VNMSUB_VX:

4984

4985 case RISCV::VWMACCU_VX:

4986 case RISCV::VWMACC_VX:

4987 case RISCV::VWMACCSU_VX:

4988 case RISCV::VWMACCUS_VX:

4989

4990 case RISCV::VMERGE_VXM:

4991

4992 case RISCV::VMV_V_X:

4993

4994 case RISCV::VSADDU_VX:

4995 case RISCV::VSADD_VX:

4996 case RISCV::VSSUBU_VX:

4997 case RISCV::VSSUB_VX:

4998

4999 case RISCV::VAADDU_VX:

5000 case RISCV::VAADD_VX:

5001 case RISCV::VASUBU_VX:

5002 case RISCV::VASUB_VX:

5003

5004 case RISCV::VSMUL_VX:

5005

5006 case RISCV::VMV_S_X:

5007

5008 case RISCV::VANDN_VX:

5009 return 1U << Log2SEW;

5010 }

5011}

5012

5015 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);

5016 if (RVV)

5017 return 0;

5018 return RVV->BaseInstr;

5019}

5020

5022 unsigned DestEEW =

5024

5025 if (DestEEW == 0)

5026 return 0;

5027

5028 unsigned Scaled = Log2SEW + (DestEEW - 1);

5031}

5032

5039 int64_t Imm;

5041 return Imm;

5042 return std::nullopt;

5043}

5044

5045

5047 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&

5048 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));

5049 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&

5050 LHS.getReg() == RHS.getReg())

5051 return true;

5053 return true;

5054 if (LHS.isImm() && LHS.getImm() == 0)

5055 return true;

5057 return false;

5060 if (!LHSImm || !RHSImm)

5061 return false;

5062 return LHSImm <= RHSImm;

5063}

5064

5065namespace {

5070

5071public:

5074 : LHS(LHS), RHS(RHS), Cond(Cond.begin(), Cond.end()) {}

5075

5076 bool shouldIgnoreForPipelining(const MachineInstr *MI) const override {

5077

5078

5080 return true;

5082 return true;

5083 return false;

5084 }

5085

5086 std::optional createTripCountGreaterCondition(

5087 int TC, MachineBasicBlock &MBB,

5088 SmallVectorImpl &CondParam) override {

5089

5090

5091

5092 CondParam = Cond;

5093 return {};

5094 }

5095

5096 void setPreheader(MachineBasicBlock *NewPreheader) override {}

5097

5098 void adjustTripCount(int TripCountAdjust) override {}

5099};

5100}

5101

5102std::unique_ptrTargetInstrInfo::PipelinerLoopInfo

5107 return nullptr;

5108

5109

5110 if (TBB == LoopBB && FBB == LoopBB)

5111 return nullptr;

5112

5113

5114 if (FBB == nullptr)

5115 return nullptr;

5116

5117 assert((TBB == LoopBB || FBB == LoopBB) &&

5118 "The Loop must be a single-basic-block loop");

5119

5120

5121 if (TBB == LoopBB)

5123

5126 if (Op.isReg())

5127 return nullptr;

5129 if (!Reg.isVirtual())

5130 return nullptr;

5131 return MRI.getVRegDef(Reg);

5132 };

5133

5136 if (LHS && LHS->isPHI())

5137 return nullptr;

5138 if (RHS && RHS->isPHI())

5139 return nullptr;

5140

5141 return std::make_unique(LHS, RHS, Cond);

5142}

5143

5144

5147 Opc = RVVMCOpcode ? RVVMCOpcode : Opc;

5148 switch (Opc) {

5149 default:

5150 return false;

5151

5152 case RISCV::DIV:

5153 case RISCV::DIVW:

5154 case RISCV::DIVU:

5155 case RISCV::DIVUW:

5156 case RISCV::REM:

5157 case RISCV::REMW:

5158 case RISCV::REMU:

5159 case RISCV::REMUW:

5160

5161 case RISCV::FDIV_H:

5162 case RISCV::FDIV_S:

5163 case RISCV::FDIV_D:

5164 case RISCV::FDIV_H_INX:

5165 case RISCV::FDIV_S_INX:

5166 case RISCV::FDIV_D_INX:

5167 case RISCV::FDIV_D_IN32X:

5168 case RISCV::FSQRT_H:

5169 case RISCV::FSQRT_S:

5170 case RISCV::FSQRT_D:

5171 case RISCV::FSQRT_H_INX:

5172 case RISCV::FSQRT_S_INX:

5173 case RISCV::FSQRT_D_INX:

5174 case RISCV::FSQRT_D_IN32X:

5175

5176 case RISCV::VDIV_VV:

5177 case RISCV::VDIV_VX:

5178 case RISCV::VDIVU_VV:

5179 case RISCV::VDIVU_VX:

5180 case RISCV::VREM_VV:

5181 case RISCV::VREM_VX:

5182 case RISCV::VREMU_VV:

5183 case RISCV::VREMU_VX:

5184

5185 case RISCV::VFDIV_VV:

5186 case RISCV::VFDIV_VF:

5187 case RISCV::VFRDIV_VF:

5188 case RISCV::VFSQRT_V:

5189 case RISCV::VFRSQRT7_V:

5190 return true;

5191 }

5192}

unsigned const MachineRegisterInfo * MRI

MachineInstrBuilder MachineInstrBuilder & DefMI

static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)

static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)

@ MachineOutlinerTailCall

Emit a save, restore, call, and return.

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

MachineBasicBlock MachineBasicBlock::iterator MBBI

#define clEnumValN(ENUMVAL, FLAGNAME, DESC)

const HexagonInstrInfo * TII

Module.h This file contains the declarations for the Module class.

const size_t AbstractManglingParser< Derived, Alloc >::NumOps

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

Register const TargetRegisterInfo * TRI

Promote Memory to Register

This file provides utility analysis objects describing memory locations.

MachineInstr unsigned OpIdx

uint64_t IntrinsicInst * II

static bool cannotInsertTailCall(const MachineBasicBlock &MBB)

Definition RISCVInstrInfo.cpp:3545

#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)

Definition RISCVInstrInfo.cpp:4120

#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)

Definition RISCVInstrInfo.cpp:4592

#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)

Definition RISCVInstrInfo.cpp:4558

#define CASE_OPERAND_SIMM(NUM)

static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)

Definition RISCVInstrInfo.cpp:107

#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)

Definition RISCVInstrInfo.cpp:4114

static bool analyzeCandidate(outliner::Candidate &C)

Definition RISCVInstrInfo.cpp:3566

static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)

Definition RISCVInstrInfo.cpp:2683

std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)

Definition RISCVInstrInfo.cpp:814

#define RVV_OPC_LMUL_CASE(OPC, INV)

#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)

Definition RISCVInstrInfo.cpp:4574

static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)

Definition RISCVInstrInfo.cpp:2718

static unsigned getAddendOperandIdx(unsigned Pattern)

Definition RISCVInstrInfo.cpp:2705

#define CASE_RVV_OPCODE_UNMASK(OP)

Definition RISCVInstrInfo.cpp:3828

#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)

Definition RISCVInstrInfo.cpp:4546

static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))

#define CASE_VFMA_SPLATS(OP)

Definition RISCVInstrInfo.cpp:3890

unsigned getPredicatedOpcode(unsigned Opcode)

Definition RISCVInstrInfo.cpp:1709

#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)

Definition RISCVInstrInfo.cpp:4585

#define CASE_WIDEOP_OPCODE_LMULS(OP)

Definition RISCVInstrInfo.cpp:4533

static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)

Definition RISCVInstrInfo.cpp:3533

#define OPCODE_LMUL_MASK_CASE(OPC)

static bool isFSUB(unsigned Opc)

Definition RISCVInstrInfo.cpp:2120

#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)

Definition RISCVInstrInfo.cpp:4085

#define CASE_RVV_OPCODE(OP)

Definition RISCVInstrInfo.cpp:3848

static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)

Definition RISCVInstrInfo.cpp:5033

#define CASE_VFMA_OPCODE_VV(OP)

Definition RISCVInstrInfo.cpp:3884

MachineOutlinerConstructionID

Definition RISCVInstrInfo.cpp:3516

#define CASE_RVV_OPCODE_WIDEN(OP)

Definition RISCVInstrInfo.cpp:3844

static unsigned getSHXADDUWShiftAmount(unsigned Opc)

Definition RISCVInstrInfo.cpp:2617

#define CASE_VMA_OPCODE_LMULS(OP, TYPE)

Definition RISCVInstrInfo.cpp:3857

static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)

Definition RISCVInstrInfo.cpp:257

static bool isFMUL(unsigned Opc)

Definition RISCVInstrInfo.cpp:2131

static unsigned getInverseXqcicmOpcode(unsigned Opcode)

Definition RISCVInstrInfo.cpp:1038

static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)

Definition RISCVInstrInfo.cpp:2557

#define OPCODE_LMUL_CASE(OPC)

#define CASE_OPERAND_UIMM(NUM)

static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)

Utility routine that checks if.

Definition RISCVInstrInfo.cpp:2586

static bool isCandidatePatchable(const MachineBasicBlock &MBB)

Definition RISCVInstrInfo.cpp:3526

static bool isFADD(unsigned Opc)

Definition RISCVInstrInfo.cpp:2109

static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)

Definition RISCVInstrInfo.cpp:2764

static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)

Definition RISCVInstrInfo.cpp:1505

static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)

Definition RISCVInstrInfo.cpp:3539

static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)

Definition RISCVInstrInfo.cpp:2507

static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)

Definition RISCVInstrInfo.cpp:2632

static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)

Definition RISCVInstrInfo.cpp:2534

static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))

static unsigned getSHXADDShiftAmount(unsigned Opc)

Definition RISCVInstrInfo.cpp:2602

#define CASE_RVV_OPCODE_MASK(OP)

Definition RISCVInstrInfo.cpp:3840

#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)

static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)

Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...

Definition RISCVInstrInfo.cpp:1762

const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB

const SmallVectorImpl< MachineOperand > & Cond

This file declares the machine register scavenger class.

static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)

static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)

This file defines the SmallVector class.

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")

static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")

static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)

static cl::opt< unsigned > CacheLineSize("cache-line-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target cache line size when " "specified by the user."))

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

const T & front() const

front - Get the first element.

bool empty() const

empty - Check if the array is empty.

static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)

Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...

std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)

bool hasMinSize() const

Optimize this function for minimum size (-Oz).

LiveInterval - This class represents the liveness of a register, or stack slot.

LiveInterval & getInterval(Register Reg)

SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)

const Segment * getSegmentContaining(SlotIndex Idx) const

Return the segment that contains the specified index, or null if there is none.

LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)

replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.

static LocationSize precise(uint64_t Value)

TypeSize getValue() const

MCInstBuilder & addReg(MCRegister Reg)

Add a new register operand.

MCInstBuilder & addImm(int64_t Val)

Add a new integer immediate operand.

Instances of this class represent a single low-level machine instruction.

Describe properties that are true of each instruction in the target description file.

unsigned getNumOperands() const

Return the number of declared MachineOperands for this MachineInstruction.

bool isConditionalBranch() const

Return true if this is a branch which may fall through to the next instruction or may transfer contro...

This holds information about one operand of a machine instruction, indicating the register class for ...

Wrapper class representing physical registers. Should be passed by value.

const FeatureBitset & getFeatureBits() const

MachineInstrBundleIterator< const MachineInstr > const_iterator

MachineInstrBundleIterator< MachineInstr, true > reverse_iterator

Instructions::const_iterator const_instr_iterator

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

MachineInstrBundleIterator< MachineInstr > iterator

MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

void setStackID(int ObjectIdx, uint8_t ID)

Align getObjectAlign(int ObjectIdx) const

Return the alignment of the specified stack object.

int64_t getObjectSize(int ObjectIdx) const

Return the size of the specified object.

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

StringRef getName() const

getName - Return the name of the corresponding LLVM function.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

const DataLayout & getDataLayout() const

Return the DataLayout attached to the Module associated to this MF.

Function & getFunction()

Return the LLVM function that this machine code represents.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const

const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & add(const MachineOperand &MO) const

const MachineInstrBuilder & addFrameIndex(int Idx) const

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register use operand.

const MachineInstrBuilder & setMIFlags(unsigned Flags) const

const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const

Copy all the implicit operands from OtherMI onto this one.

const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const

reverse_iterator getReverse() const

Get a reverse iterator to the same node.

Representation of each machine instruction.

unsigned getOpcode() const

Returns the opcode of this MachineInstr.

bool isReturn(QueryType Type=AnyInBundle) const

bool mayLoadOrStore(QueryType Type=AnyInBundle) const

Return true if this instruction could possibly read or modify memory.

const MachineBasicBlock * getParent() const

bool getFlag(MIFlag Flag) const

Return whether an MI flag is set.

LLVM_ABI unsigned getNumExplicitOperands() const

Returns the number of non-implicit operands.

bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const

Return true if the MachineInstr modifies (fully define or partially define) the specified register.

bool mayLoad(QueryType Type=AnyInBundle) const

Return true if this instruction could possibly read memory.

const MCInstrDesc & getDesc() const

Returns the target instruction descriptor of this MachineInstr.

LLVM_ABI bool hasUnmodeledSideEffects() const

Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...

bool hasOneMemOperand() const

Return true if this instruction has exactly one MachineMemOperand.

mmo_iterator memoperands_begin() const

Access to memory operands of the instruction.

LLVM_ABI bool hasOrderedMemoryRef() const

Return true if this instruction may have an ordered or volatile memory reference, or if the informati...

LLVM_ABI const MachineFunction * getMF() const

Return the function that contains the basic block that this instruction belongs to.

ArrayRef< MachineMemOperand * > memoperands() const

Access to memory operands of the instruction.

const DebugLoc & getDebugLoc() const

Returns the debug location id of this MachineInstr.

const MachineOperand & getOperand(unsigned i) const

uint32_t getFlags() const

Return the MI flags bitvector.

LLVM_ABI void clearKillInfo()

Clears kill flags on all operands.

A description of a memory reference used in the backend.

bool isNonTemporal() const

@ MOLoad

The memory access reads data.

@ MOStore

The memory access writes data.

This class contains meta information specific to a module.

MachineOperand class - Representation of each machine instruction operand.

bool isReg() const

isReg - Tests if this is a MO_Register operand.

MachineBasicBlock * getMBB() const

bool isImm() const

isImm - Tests if this is a MO_Immediate operand.

MachineInstr * getParent()

getParent - Return the instruction that this operand belongs to.

static MachineOperand CreateImm(int64_t Val)

MachineOperandType getType() const

getType - Returns the MachineOperandType for this operand.

Register getReg() const

getReg - Returns the register number.

bool isFI() const

isFI - Tests if this is a MO_FrameIndex operand.

LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const

Returns true if this operand is identical to the specified operand except for liveness related flags ...

@ MO_Immediate

Immediate operand.

@ MO_Register

Register operand.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

LLVM_ABI MachineInstr * getVRegDef(Register Reg) const

getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...

A Module instance is used to store all the information related to an LLVM module.

MI-level patchpoint operands.

uint32_t getNumPatchBytes() const

Return the number of patchable bytes the given patchpoint should emit.

void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override

Definition RISCVInstrInfo.cpp:647

MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override

Definition RISCVInstrInfo.cpp:4600

Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override

Definition RISCVInstrInfo.cpp:101

std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override

Definition RISCVInstrInfo.cpp:3588

unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override

Definition RISCVInstrInfo.cpp:1271

void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override

Definition RISCVInstrInfo.cpp:2822

void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const

Definition RISCVInstrInfo.cpp:900

MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override

Definition RISCVInstrInfo.cpp:3243

void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const

Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...

Definition RISCVInstrInfo.cpp:4714

static bool isPairableLdStInstOpc(unsigned Opc)

Return true if pairing the given load or store may be paired with another.

Definition RISCVInstrInfo.cpp:3264

RISCVInstrInfo(const RISCVSubtarget &STI)

Definition RISCVInstrInfo.cpp:84

bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override

Definition RISCVInstrInfo.cpp:3492

std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override

Definition RISCVInstrInfo.cpp:5103

unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override

Definition RISCVInstrInfo.cpp:1305

bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override

Definition RISCVInstrInfo.cpp:2360

static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)

Definition RISCVInstrInfo.cpp:3276

void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const

Definition RISCVInstrInfo.cpp:383

bool isReMaterializableImpl(const MachineInstr &MI) const override

Definition RISCVInstrInfo.cpp:237

MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override

Definition RISCVInstrInfo.cpp:1835

bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override

Definition RISCVInstrInfo.cpp:3187

void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override

Definition RISCVInstrInfo.cpp:1343

bool isAsCheapAsAMove(const MachineInstr &MI) const override

Definition RISCVInstrInfo.cpp:1986

bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override

Definition RISCVInstrInfo.cpp:2854

bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const

Definition RISCVInstrInfo.cpp:3408

unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override

Definition RISCVInstrInfo.cpp:4826

void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override

Definition RISCVInstrInfo.cpp:2349

const RISCVSubtarget & STI

Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override

Definition RISCVInstrInfo.cpp:186

std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override

Definition RISCVInstrInfo.cpp:2439

bool simplifyInstruction(MachineInstr &MI) const override

Definition RISCVInstrInfo.cpp:4292

ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override

Definition RISCVInstrInfo.cpp:3472

outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override

Definition RISCVInstrInfo.cpp:3659

MachineTraceStrategy getMachineCombinerTraceStrategy() const override

Definition RISCVInstrInfo.cpp:2065

unsigned getInstSizeInBytes(const MachineInstr &MI) const override

Definition RISCVInstrInfo.cpp:1900

std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override

Definition RISCVInstrInfo.cpp:3730

bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override

Definition RISCVInstrInfo.cpp:1414

ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override

Definition RISCVInstrInfo.cpp:4819

MCInst getNop() const override

Definition RISCVInstrInfo.cpp:92

MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override

Definition RISCVInstrInfo.cpp:885

void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override

Definition RISCVInstrInfo.cpp:731

bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override

Definition RISCVInstrInfo.cpp:3509

bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override

Definition RISCVInstrInfo.cpp:3297

void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override

Definition RISCVInstrInfo.cpp:3695

void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override

Definition RISCVInstrInfo.cpp:2079

std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override

Definition RISCVInstrInfo.cpp:3466

MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override

Definition RISCVInstrInfo.cpp:4127

bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override

Definition RISCVInstrInfo.cpp:2326

MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override

Definition RISCVInstrInfo.cpp:1654

std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override

Definition RISCVInstrInfo.cpp:3748

bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override

Definition RISCVInstrInfo.cpp:3521

void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override

Definition RISCVInstrInfo.cpp:506

bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override

Definition RISCVInstrInfo.cpp:3897

bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override

Definition RISCVInstrInfo.cpp:1199

MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override

Definition RISCVInstrInfo.cpp:3711

bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override

Definition RISCVInstrInfo.cpp:1661

static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)

Definition RISCVInstrInfo.cpp:965

bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override

Definition RISCVInstrInfo.cpp:2383

CombinerObjective getCombinerObjective(unsigned Pattern) const override

Definition RISCVInstrInfo.cpp:2657

bool isHighLatencyDef(int Opc) const override

Definition RISCVInstrInfo.cpp:5145

static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)

Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.

Definition RISCVInstrInfo.cpp:1004

bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override

Definition RISCVInstrInfo.cpp:2669

bool optimizeCondBranch(MachineInstr &MI) const override

Definition RISCVInstrInfo.cpp:1528

std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override

Definition RISCVInstrInfo.cpp:2012

bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override

Definition RISCVInstrInfo.cpp:1811

static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)

Return true if the operand is a load immediate instruction and sets Imm to the immediate value.

Definition RISCVInstrInfo.cpp:1514

bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override

Definition RISCVInstrInfo.cpp:3374

bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override

Definition RISCVInstrInfo.cpp:3432

RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...

int getBranchRelaxationScratchFrameIndex() const

const RISCVRegisterInfo * getRegisterInfo() const override

Wrapper class representing virtual and physical registers.

constexpr bool isValid() const

constexpr bool isVirtual() const

Return true if the specified register number is in the virtual register namespace.

SlotIndex - An opaque wrapper around machine indexes.

SlotIndex getRegSlot(bool EC=false) const

Returns the register use/def slot in the current instruction for a normal or early-clobber def.

A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...

bool erase(PtrType Ptr)

Remove pointer from the set.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

MI-level stackmap operands.

uint32_t getNumPatchBytes() const

Return the number of patchable bytes the given stackmap should emit.

MI-level Statepoint operands.

uint32_t getNumPatchBytes() const

Return the number of patchable bytes the given statepoint should emit.

StringRef - Represent a constant reference to a string, i.e.

Object returned by analyzeLoopForPipelining.

TargetInstrInfo - Interface to description of machine instruction set.

virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const

Returns true iff the routine could find two commutable operands in the given machine instruction.

virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const

Return true when \P Inst has reassociable operands in the same \P MBB.

virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const

When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...

virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const

Return true when there is potentially a faster code sequence for an instruction chain ending in Root.

virtual bool isReMaterializableImpl(const MachineInstr &MI) const

For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...

virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const

Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...

virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const

The returned array encodes the operand index for each parameter because the operands may be commuted;...

virtual CombinerObjective getCombinerObjective(unsigned Pattern) const

Return the objective of a combiner pattern.

virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const

This method commutes the operands of the given machine instruction MI.

virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const

Return true when \P Inst has reassociable sibling.

virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const

const MCAsmInfo * getMCAsmInfo() const

Return target specific asm information.

const uint8_t TSFlags

Configurable target specific flags.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

TargetSubtargetInfo - Generic base class for all target subtargets.

virtual const TargetInstrInfo * getInstrInfo() const

virtual const TargetRegisterInfo * getRegisterInfo() const =0

Return the target's register information.

Target - Wrapper for Target specific information.

static constexpr TypeSize getFixed(ScalarTy ExactSize)

static constexpr TypeSize getZero()

static constexpr TypeSize getScalable(ScalarTy MinimumSize)

A raw_ostream that writes to an std::string.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

@ C

The default llvm calling convention, compatible with C.

CondCode getInverseBranchCondition(CondCode)

Definition RISCVInstrInfo.cpp:1180

unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)

Definition RISCVInstrInfo.cpp:1069

static bool isValidRoundingMode(unsigned Mode)

static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)

static bool usesMaskPolicy(uint64_t TSFlags)

static bool hasRoundModeOp(uint64_t TSFlags)

static unsigned getVLOpNum(const MCInstrDesc &Desc)

static bool hasVLOp(uint64_t TSFlags)

static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)

static int getFRMOpNum(const MCInstrDesc &Desc)

static bool hasVecPolicyOp(uint64_t TSFlags)

static bool usesVXRM(uint64_t TSFlags)

static bool isRVVWideningReduction(uint64_t TSFlags)

static unsigned getSEWOpNum(const MCInstrDesc &Desc)

static bool hasSEWOp(uint64_t TSFlags)

static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)

InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)

SmallVector< Inst, 8 > InstSeq

@ OPERAND_UIMMLOG2XLEN_NONZERO

@ OPERAND_SIMM12_LSB00000

@ OPERAND_FIRST_RISCV_IMM

@ OPERAND_UIMM10_LSB00_NONZERO

@ OPERAND_SIMM10_LSB0000_NONZERO

@ OPERAND_ATOMIC_ORDERING

static unsigned getNF(uint8_t TSFlags)

static RISCVVType::VLMUL getLMul(uint8_t TSFlags)

Definition RISCVInstrInfo.cpp:68

static bool isTailAgnostic(unsigned VType)

LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)

LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)

static bool isValidSEW(unsigned SEW)

LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)

static bool isValidXSfmmVType(unsigned VTypeI)

static unsigned getSEW(unsigned VType)

static VLMUL getVLMUL(unsigned VType)

bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)

Definition RISCVInstrInfo.cpp:4883

bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)

Given two VL operands, do we know that LHS <= RHS?

Definition RISCVInstrInfo.cpp:5046

unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)

Definition RISCVInstrInfo.cpp:5013

unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)

Definition RISCVInstrInfo.cpp:5021

std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)

Definition RISCVInstrInfo.cpp:4896

std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)

Definition RISCVInstrInfo.cpp:4843

static constexpr unsigned RVVBitsPerBlock

bool isRVVSpill(const MachineInstr &MI)

Definition RISCVInstrInfo.cpp:4832

static constexpr unsigned RVVBytesPerBlock

static constexpr int64_t VLMaxSentinel

@ Implicit

Not emitted register (e.g. carry, or temporary result).

@ Define

Register definition.

@ Kill

The last use of a register.

@ Undef

Value of the register doesn't matter.

ValuesClass values(OptsTy... Options)

Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...

initializer< Ty > init(const Ty &Val)

InstrType

Represents how an instruction should be mapped by the outliner.

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

MachineTraceStrategy

Strategies for selecting traces.

@ TS_MinInstrCount

Select the trace through a block that has the fewest instructions.

@ TS_Local

Select the trace that contains only the current basic block.

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

static const MachineMemOperand::Flags MONontemporalBit1

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

constexpr bool isInt(int64_t x)

Checks if an integer fits into the given bit width.

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

bool isValidAtomicOrdering(Int I)

static const MachineMemOperand::Flags MONontemporalBit0

unsigned getDeadRegState(bool B)

constexpr bool has_single_bit(T Value) noexcept

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

unsigned Log2_32(uint32_t Value)

Return the floor log base 2 of the specified value, -1 if the value is zero.

MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)

decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)

LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)

CombinerObjective

The combiner's goal may differ based on which pattern it is attempting to optimize.

constexpr bool isUInt(uint64_t x)

Checks if an unsigned integer fits into the given bit width.

CodeGenOptLevel

Code generation optimization level.

int isShifted359(T Value, int &Shift)

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

unsigned getKillRegState(bool B)

unsigned getRenamableRegState(bool B)

DWARFExpression::Operation Op

ArrayRef(const T &OneElt) -> ArrayRef< T >

constexpr bool isShiftedInt(int64_t x)

Checks if a signed integer is an N bit number shifted left by S.

void erase_if(Container &C, UnaryPredicate P)

Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...

constexpr int64_t SignExtend64(uint64_t x)

Sign-extend the number in the bottom B bits of X to a 64-bit integer.

LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)

This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....

constexpr bool isShiftedUInt(uint64_t x)

Checks if a unsigned integer is an N bit number shifted left by S.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.

This represents a simple continuous liveness interval for a value.

static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

static bool isRVVRegClass(const TargetRegisterClass *RC)

Used to describe a register and immediate addition.

An individual sequence of instructions to be replaced with a call to an outlined function.

MachineFunction * getMF() const

The information necessary to create an outlined function for some class of candidate.