LLVM: lib/Target/RISCV/RISCVInstrInfo.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
38
39using namespace llvm;
40
41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
43
44#define GET_INSTRINFO_CTOR_DTOR
45#define GET_INSTRINFO_NAMED_OPS
46#include "RISCVGenInstrInfo.inc"
47
48#define DEBUG_TYPE "riscv-instr-info"
50 "Number of registers within vector register groups spilled");
52 "Number of registers within vector register groups reloaded");
53
56 cl::desc("Prefer whole register move for vector registers."));
57
59 "riscv-force-machine-combiner-strategy", cl::Hidden,
60 cl::desc("Force machine combiner to use a specific strategy for machine "
61 "trace metrics evaluation."),
64 "Local strategy."),
66 "MinInstrCount strategy.")));
67
69
70using namespace RISCV;
71
72#define GET_RISCVVPseudosTable_IMPL
73#include "RISCVGenSearchableTables.inc"
74
75}
76
78
79#define GET_RISCVMaskedPseudosTable_IMPL
80#include "RISCVGenSearchableTables.inc"
81
82}
83
88
89#define GET_INSTRINFO_HELPERS
90#include "RISCVGenInstrInfo.inc"
91
93 if (STI.hasStdExtZca())
99}
100
102 int &FrameIndex) const {
105}
106
108 switch (Opcode) {
109 default:
110 return std::nullopt;
111 case RISCV::VS1R_V:
112 case RISCV::VL1RE8_V:
113 case RISCV::VL1RE16_V:
114 case RISCV::VL1RE32_V:
115 case RISCV::VL1RE64_V:
116 return 1;
117 case RISCV::VS2R_V:
118 case RISCV::VL2RE8_V:
119 case RISCV::VL2RE16_V:
120 case RISCV::VL2RE32_V:
121 case RISCV::VL2RE64_V:
122 return 2;
123 case RISCV::VS4R_V:
124 case RISCV::VL4RE8_V:
125 case RISCV::VL4RE16_V:
126 case RISCV::VL4RE32_V:
127 case RISCV::VL4RE64_V:
128 return 4;
129 case RISCV::VS8R_V:
130 case RISCV::VL8RE8_V:
131 case RISCV::VL8RE16_V:
132 case RISCV::VL8RE32_V:
133 case RISCV::VL8RE64_V:
134 return 8;
135 }
136}
137
139 int &FrameIndex,
141 switch (MI.getOpcode()) {
142 default:
143 return 0;
144 case RISCV::LB:
145 case RISCV::LBU:
147 break;
148 case RISCV::LH:
149 case RISCV::LH_INX:
150 case RISCV::LHU:
151 case RISCV::FLH:
153 break;
154 case RISCV::LW:
155 case RISCV::LW_INX:
156 case RISCV::FLW:
157 case RISCV::LWU:
159 break;
160 case RISCV::LD:
161 case RISCV::LD_RV32:
162 case RISCV::FLD:
164 break;
165 case RISCV::VL1RE8_V:
166 case RISCV::VL2RE8_V:
167 case RISCV::VL4RE8_V:
168 case RISCV::VL8RE8_V:
169 if (.getOperand(1).isFI())
171 FrameIndex = MI.getOperand(1).getIndex();
174 return MI.getOperand(0).getReg();
175 }
176
177 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
178 MI.getOperand(2).getImm() == 0) {
179 FrameIndex = MI.getOperand(1).getIndex();
180 return MI.getOperand(0).getReg();
181 }
182
183 return 0;
184}
185
187 int &FrameIndex) const {
190}
191
193 int &FrameIndex,
195 switch (MI.getOpcode()) {
196 default:
197 return 0;
198 case RISCV::SB:
200 break;
201 case RISCV::SH:
202 case RISCV::SH_INX:
203 case RISCV::FSH:
205 break;
206 case RISCV::SW:
207 case RISCV::SW_INX:
208 case RISCV::FSW:
210 break;
211 case RISCV::SD:
212 case RISCV::SD_RV32:
213 case RISCV::FSD:
215 break;
216 case RISCV::VS1R_V:
217 case RISCV::VS2R_V:
218 case RISCV::VS4R_V:
219 case RISCV::VS8R_V:
220 if (.getOperand(1).isFI())
222 FrameIndex = MI.getOperand(1).getIndex();
225 return MI.getOperand(0).getReg();
226 }
227
228 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
229 MI.getOperand(2).getImm() == 0) {
230 FrameIndex = MI.getOperand(1).getIndex();
231 return MI.getOperand(0).getReg();
232 }
233
234 return 0;
235}
236
240 case RISCV::VMV_V_X:
241 case RISCV::VFMV_V_F:
242 case RISCV::VMV_V_I:
243 case RISCV::VMV_S_X:
244 case RISCV::VFMV_S_F:
245 case RISCV::VID_V:
246 return MI.getOperand(1).isUndef();
247 default:
249 }
250}
251
253 unsigned NumRegs) {
254 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
255}
256
263 return false;
264
265 assert(MBBI->getOpcode() == TargetOpcode::COPY &&
266 "Unexpected COPY instruction.");
267 Register SrcReg = MBBI->getOperand(1).getReg();
269
270 bool FoundDef = false;
271 bool FirstVSetVLI = false;
272 unsigned FirstSEW = 0;
273 while (MBBI != MBB.begin()) {
275 if (MBBI->isMetaInstruction())
276 continue;
277
278 if (RISCVInstrInfo::isVectorConfigInstr(*MBBI)) {
279
280
281
282
283
284
285 if (!FoundDef) {
286 if (!FirstVSetVLI) {
287 FirstVSetVLI = true;
288 unsigned FirstVType = MBBI->getOperand(2).getImm();
291
292
293 if (FirstLMul != LMul)
294 return false;
295 }
296
297
298 if (!RISCVInstrInfo::isVLPreservingConfig(*MBBI))
299 return false;
300 continue;
301 }
302
303
304 unsigned VType = MBBI->getOperand(2).getImm();
305
306 if (FirstVSetVLI) {
307
309 return false;
310 }
311
312
314 return false;
315
316
317
318
319
320
322 } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
323 return false;
324 } else if (MBBI->getNumDefs()) {
325
326
327 if (MBBI->modifiesRegister(RISCV::VL, nullptr))
328 return false;
329
330
331
333 if (!MO.isReg() || !MO.isDef())
334 continue;
335 if (!FoundDef && TRI->regsOverlap(MO.getReg(), SrcReg)) {
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350 if (MO.getReg() != SrcReg)
351 return false;
352
353
354
355
356
357
358
359
360
361
364 return false;
365
366
367
369 return false;
370
371
372 FoundDef = true;
373 DefMBBI = MBBI;
374 break;
375 }
376 }
377 }
378 }
379
380 return false;
381}
382
390
391 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
392 uint16_t DstEncoding = TRI->getEncodingValue(DstReg);
394 assert(!Fractional && "It is impossible be fractional lmul here.");
395 unsigned NumRegs = NF * LMulVal;
396 bool ReversedCopy =
398 if (ReversedCopy) {
399
400
401 SrcEncoding += NumRegs - 1;
402 DstEncoding += NumRegs - 1;
403 }
404
405 unsigned I = 0;
406 auto GetCopyInfo = [&](uint16_t SrcEncoding, uint16_t DstEncoding)
408 unsigned, unsigned> {
409 if (ReversedCopy) {
410
411
412
413
414
415
416 uint16_t Diff = DstEncoding - SrcEncoding;
417 if (I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
418 DstEncoding % 8 == 7)
420 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
421 if (I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
422 DstEncoding % 4 == 3)
424 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
425 if (I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
426 DstEncoding % 2 == 1)
428 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
429
431 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
432 }
433
434
435
436 if (I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
438 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
439 if (I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
441 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
442 if (I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
444 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
445
447 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
448 };
449
450 while (I != NumRegs) {
451
452
453
454
455 auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =
456 GetCopyInfo(SrcEncoding, DstEncoding);
458
460 if (LMul == LMulCopied &&
462 Opc = VVOpc;
463 if (DefMBBI->getOpcode() == VIOpc)
464 Opc = VIOpc;
465 }
466
467
468
469 MCRegister ActualSrcReg = TRI->findVRegWithEncoding(
470 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
471 MCRegister ActualDstReg = TRI->findVRegWithEncoding(
472 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
473
477 if (UseVMV)
479 if (UseVMV_V_I)
480 MIB = MIB.add(DefMBBI->getOperand(2));
481 else
482 MIB = MIB.addReg(ActualSrcReg, getKillRegState(KillSrc));
483 if (UseVMV) {
486 unsigned Log2SEW =
488 MIB.addImm(Log2SEW ? Log2SEW : 3);
489 MIB.addImm(0);
492 }
493
494
495
496
498
499
500 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
501 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
502 I += NumCopied;
503 }
504}
505
509 Register SrcReg, bool KillSrc,
510 bool RenamableDest, bool RenamableSrc) const {
513
514 if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
518 return;
519 }
520
521 if (RISCV::GPRF16RegClass.contains(DstReg, SrcReg)) {
524 return;
525 }
526
527 if (RISCV::GPRF32RegClass.contains(DstReg, SrcReg)) {
530 return;
531 }
532
533 if (RISCV::GPRPairRegClass.contains(DstReg, SrcReg)) {
534 if (STI.isRV32() && STI.hasStdExtZdinx()) {
535
536
540 return;
541 }
542
543 MCRegister EvenReg = TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
544 MCRegister OddReg = TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
545
546 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
547 OddReg = RISCV::X0;
548 assert(DstReg != RISCV::X0_Pair && "Cannot write to X0_Pair");
549
550
552 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
553 .addReg(EvenReg, KillFlag)
556 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
557 .addReg(OddReg, KillFlag)
559 return;
560 }
561
562
563 if (RISCV::VCSRRegClass.contains(SrcReg) &&
564 RISCV::GPRRegClass.contains(DstReg)) {
566 .addImm(RISCVSysReg::lookupSysRegByName(TRI->getName(SrcReg))->Encoding)
568 return;
569 }
570
571 if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
572 unsigned Opc;
573 if (STI.hasStdExtZfh()) {
574 Opc = RISCV::FSGNJ_H;
575 } else {
577 (STI.hasStdExtZfhmin() || STI.hasStdExtZfbfmin()) &&
578 "Unexpected extensions");
579
580 DstReg = TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
581 &RISCV::FPR32RegClass);
582 SrcReg = TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
583 &RISCV::FPR32RegClass);
584 Opc = RISCV::FSGNJ_S;
585 }
587 .addReg(SrcReg, KillFlag)
588 .addReg(SrcReg, KillFlag);
589 return;
590 }
591
592 if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
594 .addReg(SrcReg, KillFlag)
595 .addReg(SrcReg, KillFlag);
596 return;
597 }
598
599 if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
601 .addReg(SrcReg, KillFlag)
602 .addReg(SrcReg, KillFlag);
603 return;
604 }
605
606 if (RISCV::FPR32RegClass.contains(DstReg) &&
607 RISCV::GPRRegClass.contains(SrcReg)) {
609 .addReg(SrcReg, KillFlag);
610 return;
611 }
612
613 if (RISCV::GPRRegClass.contains(DstReg) &&
614 RISCV::FPR32RegClass.contains(SrcReg)) {
616 .addReg(SrcReg, KillFlag);
617 return;
618 }
619
620 if (RISCV::FPR64RegClass.contains(DstReg) &&
621 RISCV::GPRRegClass.contains(SrcReg)) {
622 assert(STI.getXLen() == 64 && "Unexpected GPR size");
624 .addReg(SrcReg, KillFlag);
625 return;
626 }
627
628 if (RISCV::GPRRegClass.contains(DstReg) &&
629 RISCV::FPR64RegClass.contains(SrcReg)) {
630 assert(STI.getXLen() == 64 && "Unexpected GPR size");
632 .addReg(SrcReg, KillFlag);
633 return;
634 }
635
636
638 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
641 return;
642 }
643
645}
646
649 Register SrcReg, bool IsKill, int FI,
655
656 unsigned Opcode;
657 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
658 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
659 : RISCV::SD;
660 } else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
661 Opcode = RISCV::SH_INX;
662 } else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::SW_INX;
664 } else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
665 Opcode = RISCV::PseudoRV32ZdinxSD;
666 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
667 Opcode = RISCV::FSH;
668 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
669 Opcode = RISCV::FSW;
670 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
671 Opcode = RISCV::FSD;
672 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
673 Opcode = RISCV::VS1R_V;
674 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
675 Opcode = RISCV::VS2R_V;
676 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
677 Opcode = RISCV::VS4R_V;
678 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
679 Opcode = RISCV::VS8R_V;
680 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVSPILL2_M1;
682 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVSPILL2_M2;
684 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVSPILL2_M4;
686 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
687 Opcode = RISCV::PseudoVSPILL3_M1;
688 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
689 Opcode = RISCV::PseudoVSPILL3_M2;
690 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
691 Opcode = RISCV::PseudoVSPILL4_M1;
692 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
693 Opcode = RISCV::PseudoVSPILL4_M2;
694 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
695 Opcode = RISCV::PseudoVSPILL5_M1;
696 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
697 Opcode = RISCV::PseudoVSPILL6_M1;
698 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
699 Opcode = RISCV::PseudoVSPILL7_M1;
700 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
701 Opcode = RISCV::PseudoVSPILL8_M1;
702 else
704
709
717 } else {
721
728 }
729}
730
741
742 unsigned Opcode;
743 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
744 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
745 : RISCV::LD;
746 } else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
747 Opcode = RISCV::LH_INX;
748 } else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
749 Opcode = RISCV::LW_INX;
750 } else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
751 Opcode = RISCV::PseudoRV32ZdinxLD;
752 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
753 Opcode = RISCV::FLH;
754 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
755 Opcode = RISCV::FLW;
756 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
757 Opcode = RISCV::FLD;
758 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
759 Opcode = RISCV::VL1RE8_V;
760 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
761 Opcode = RISCV::VL2RE8_V;
762 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
763 Opcode = RISCV::VL4RE8_V;
764 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
765 Opcode = RISCV::VL8RE8_V;
766 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
767 Opcode = RISCV::PseudoVRELOAD2_M1;
768 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
769 Opcode = RISCV::PseudoVRELOAD2_M2;
770 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
771 Opcode = RISCV::PseudoVRELOAD2_M4;
772 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
773 Opcode = RISCV::PseudoVRELOAD3_M1;
774 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
775 Opcode = RISCV::PseudoVRELOAD3_M2;
776 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
777 Opcode = RISCV::PseudoVRELOAD4_M1;
778 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
779 Opcode = RISCV::PseudoVRELOAD4_M2;
780 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
781 Opcode = RISCV::PseudoVRELOAD5_M1;
782 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
783 Opcode = RISCV::PseudoVRELOAD6_M1;
784 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
785 Opcode = RISCV::PseudoVRELOAD7_M1;
786 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
787 Opcode = RISCV::PseudoVRELOAD8_M1;
788 else
789 llvm_unreachable("Can't load this register from stack slot");
790
795
802 } else {
806
812 }
813}
817
818
819
820
822 return std::nullopt;
823
824
825 if (Ops.size() != 1 || Ops[0] != 1)
826 return std::nullopt;
827
828 switch (MI.getOpcode()) {
829 default:
830 if (RISCVInstrInfo::isSEXT_W(MI))
831 return RISCV::LW;
832 if (RISCVInstrInfo::isZEXT_W(MI))
833 return RISCV::LWU;
834 if (RISCVInstrInfo::isZEXT_B(MI))
835 return RISCV::LBU;
836 break;
837 case RISCV::SEXT_H:
838 return RISCV::LH;
839 case RISCV::SEXT_B:
840 return RISCV::LB;
841 case RISCV::ZEXT_H_RV32:
842 case RISCV::ZEXT_H_RV64:
843 return RISCV::LHU;
844 }
845
847 default:
848 return std::nullopt;
849 case RISCV::VMV_X_S: {
850 unsigned Log2SEW =
852 if (ST.getXLen() < (1U << Log2SEW))
853 return std::nullopt;
854 switch (Log2SEW) {
855 case 3:
856 return RISCV::LB;
857 case 4:
858 return RISCV::LH;
859 case 5:
860 return RISCV::LW;
861 case 6:
862 return RISCV::LD;
863 default:
865 }
866 }
867 case RISCV::VFMV_F_S: {
868 unsigned Log2SEW =
870 switch (Log2SEW) {
871 case 4:
872 return RISCV::FLH;
873 case 5:
874 return RISCV::FLW;
875 case 6:
876 return RISCV::FLD;
877 default:
879 }
880 }
881 }
882}
883
884
889
891 if (!LoadOpc)
892 return nullptr;
893 Register DstReg = MI.getOperand(0).getReg();
894 return BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(*LoadOpc),
895 DstReg)
898}
899
904 bool DstIsDead) const {
906
907
909
911 report_fatal_error("Should only materialize 32-bit constants for RV32");
912
913
915 }
916
919
920 bool SrcRenamable = false;
921 unsigned Num = 0;
922
924 bool LastItem = ++Num == Seq.size();
925 unsigned DstRegState = getDeadRegState(DstIsDead && LastItem) |
927 unsigned SrcRegState = getKillRegState(SrcReg != RISCV::X0) |
929 switch (Inst.getOpndKind()) {
933 .addImm(Inst.getImm())
935 break;
939 .addReg(SrcReg, SrcRegState)
942 break;
946 .addReg(SrcReg, SrcRegState)
947 .addReg(SrcReg, SrcRegState)
949 break;
953 .addReg(SrcReg, SrcRegState)
954 .addImm(Inst.getImm())
956 break;
957 }
958
959
960 SrcReg = DstReg;
961 SrcRenamable = DstRenamable;
962 }
963}
964
966 switch (Opc) {
967 default:
969 case RISCV::BEQ:
970 case RISCV::BEQI:
971 case RISCV::CV_BEQIMM:
972 case RISCV::QC_BEQI:
973 case RISCV::QC_E_BEQI:
974 case RISCV::NDS_BBC:
975 case RISCV::NDS_BEQC:
977 case RISCV::BNE:
978 case RISCV::BNEI:
979 case RISCV::QC_BNEI:
980 case RISCV::QC_E_BNEI:
981 case RISCV::CV_BNEIMM:
982 case RISCV::NDS_BBS:
983 case RISCV::NDS_BNEC:
985 case RISCV::BLT:
986 case RISCV::QC_BLTI:
987 case RISCV::QC_E_BLTI:
989 case RISCV::BGE:
990 case RISCV::QC_BGEI:
991 case RISCV::QC_E_BGEI:
993 case RISCV::BLTU:
994 case RISCV::QC_BLTUI:
995 case RISCV::QC_E_BLTUI:
997 case RISCV::BGEU:
998 case RISCV::QC_BGEUI:
999 case RISCV::QC_E_BGEUI:
1001 }
1002}
1003
1005 int64_t C1) {
1006 switch (CC) {
1007 default:
1010 return C0 == C1;
1012 return C0 != C1;
1014 return C0 < C1;
1016 return C0 >= C1;
1021 }
1022}
1023
1024
1025
1026
1029
1031 "Unknown conditional branch");
1036}
1037
1039 switch (Opcode) {
1040 default:
1042 case RISCV::QC_MVEQ:
1043 return RISCV::QC_MVNE;
1044 case RISCV::QC_MVNE:
1045 return RISCV::QC_MVEQ;
1046 case RISCV::QC_MVLT:
1047 return RISCV::QC_MVGE;
1048 case RISCV::QC_MVGE:
1049 return RISCV::QC_MVLT;
1050 case RISCV::QC_MVLTU:
1051 return RISCV::QC_MVGEU;
1052 case RISCV::QC_MVGEU:
1053 return RISCV::QC_MVLTU;
1054 case RISCV::QC_MVEQI:
1055 return RISCV::QC_MVNEI;
1056 case RISCV::QC_MVNEI:
1057 return RISCV::QC_MVEQI;
1058 case RISCV::QC_MVLTI:
1059 return RISCV::QC_MVGEI;
1060 case RISCV::QC_MVGEI:
1061 return RISCV::QC_MVLTI;
1062 case RISCV::QC_MVLTUI:
1063 return RISCV::QC_MVGEUI;
1064 case RISCV::QC_MVGEUI:
1065 return RISCV::QC_MVLTUI;
1066 }
1067}
1068
1070 switch (SelectOpc) {
1071 default:
1072 switch (CC) {
1073 default:
1076 return RISCV::BEQ;
1078 return RISCV::BNE;
1080 return RISCV::BLT;
1082 return RISCV::BGE;
1084 return RISCV::BLTU;
1086 return RISCV::BGEU;
1087 }
1088 break;
1089 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1090 switch (CC) {
1091 default:
1094 return RISCV::BEQI;
1096 return RISCV::BNEI;
1097 }
1098 break;
1099 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1100 switch (CC) {
1101 default:
1104 return RISCV::CV_BEQIMM;
1106 return RISCV::CV_BNEIMM;
1107 }
1108 break;
1109 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1110 switch (CC) {
1111 default:
1114 return RISCV::QC_BEQI;
1116 return RISCV::QC_BNEI;
1118 return RISCV::QC_BLTI;
1120 return RISCV::QC_BGEI;
1121 }
1122 break;
1123 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1124 switch (CC) {
1125 default:
1128 return RISCV::QC_BLTUI;
1130 return RISCV::QC_BGEUI;
1131 }
1132 break;
1133 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1134 switch (CC) {
1135 default:
1138 return RISCV::QC_E_BEQI;
1140 return RISCV::QC_E_BNEI;
1142 return RISCV::QC_E_BLTI;
1144 return RISCV::QC_E_BGEI;
1145 }
1146 break;
1147 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1148 switch (CC) {
1149 default:
1152 return RISCV::QC_E_BLTUI;
1154 return RISCV::QC_E_BGEUI;
1155 }
1156 break;
1157 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1158 switch (CC) {
1159 default:
1162 return RISCV::NDS_BBC;
1164 return RISCV::NDS_BBS;
1165 }
1166 break;
1167 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1168 switch (CC) {
1169 default:
1172 return RISCV::NDS_BEQC;
1174 return RISCV::NDS_BNEC;
1175 }
1176 break;
1177 }
1178}
1179
1181 switch (CC) {
1182 default:
1196 }
1197}
1198
1203 bool AllowModify) const {
1204 TBB = FBB = nullptr;
1205 Cond.clear();
1206
1207
1209 if (I == MBB.end() || !isUnpredicatedTerminator(*I))
1210 return false;
1211
1212
1213
1215 int NumTerminators = 0;
1216 for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
1217 J++) {
1218 NumTerminators++;
1219 if (J->getDesc().isUnconditionalBranch() ||
1220 J->getDesc().isIndirectBranch()) {
1221 FirstUncondOrIndirectBr = J.getReverse();
1222 }
1223 }
1224
1225
1226
1227 if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
1228 while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
1229 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1230 NumTerminators--;
1231 }
1232 I = FirstUncondOrIndirectBr;
1233 }
1234
1235
1236 if (I->getDesc().isIndirectBranch())
1237 return true;
1238
1239
1240 if (I->isPreISelOpcode())
1241 return true;
1242
1243
1244 if (NumTerminators > 2)
1245 return true;
1246
1247
1248 if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
1250 return false;
1251 }
1252
1253
1254 if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
1256 return false;
1257 }
1258
1259
1260 if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
1261 I->getDesc().isUnconditionalBranch()) {
1264 return false;
1265 }
1266
1267
1268 return true;
1269}
1270
1272 int *BytesRemoved) const {
1273 if (BytesRemoved)
1274 *BytesRemoved = 0;
1277 return 0;
1278
1279 if (->getDesc().isUnconditionalBranch() &&
1280 ->getDesc().isConditionalBranch())
1281 return 0;
1282
1283
1284 if (BytesRemoved)
1286 I->eraseFromParent();
1287
1289
1291 return 1;
1292 --I;
1293 if (->getDesc().isConditionalBranch())
1294 return 1;
1295
1296
1297 if (BytesRemoved)
1299 I->eraseFromParent();
1300 return 2;
1301}
1302
1303
1304
1308 if (BytesAdded)
1309 *BytesAdded = 0;
1310
1311
1312 assert(TBB && "insertBranch must not be told to insert a fallthrough");
1314 "RISC-V branch conditions have two components!");
1315
1316
1317 if (Cond.empty()) {
1319 if (BytesAdded)
1321 return 1;
1322 }
1323
1324
1329 if (BytesAdded)
1331
1332
1333 if (!FBB)
1334 return 1;
1335
1336
1338 if (BytesAdded)
1340 return 2;
1341}
1342
1346 const DebugLoc &DL, int64_t BrOffset,
1348 assert(RS && "RegScavenger required for long branching");
1350 "new block should be inserted for expanding unconditional branch");
1353 "restore block should be inserted for restoring clobbered registers");
1354
1359
1362 "Branch offsets outside of the signed 32-bit range not supported");
1363
1364
1365
1366
1367 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1369
1373
1374 RS->enterBasicBlockEnd(MBB);
1376 if (STI.hasStdExtZicfilp())
1377 RC = &RISCV::GPRX7RegClass;
1379 RS->scavengeRegisterBackwards(*RC, MI.getIterator(),
1380 false, 0,
1381 false);
1383 RS->setRegUsed(TmpGPR);
1384 else {
1385
1386
1387
1388 TmpGPR = STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1389
1390 if (STI.hasStdExtZicfilp())
1391 TmpGPR = RISCV::X7;
1392
1394 if (FrameIndex == -1)
1396
1398 &RISCV::GPRRegClass, Register());
1399 TRI->eliminateFrameIndex(std::prev(MI.getIterator()),
1400 0, 1);
1401
1402 MI.getOperand(1).setMBB(&RestoreBB);
1403
1405 &RISCV::GPRRegClass, Register());
1406 TRI->eliminateFrameIndex(RestoreBB.back(),
1407 0, 1);
1408 }
1409
1410 MRI.replaceRegWith(ScratchReg, TmpGPR);
1411 MRI.clearVirtRegs();
1412}
1413
1416 assert((Cond.size() == 3) && "Invalid branch condition!");
1418 default:
1420 case RISCV::BEQ:
1421 Cond[0].setImm(RISCV::BNE);
1422 break;
1423 case RISCV::BEQI:
1424 Cond[0].setImm(RISCV::BNEI);
1425 break;
1426 case RISCV::BNE:
1427 Cond[0].setImm(RISCV::BEQ);
1428 break;
1429 case RISCV::BNEI:
1430 Cond[0].setImm(RISCV::BEQI);
1431 break;
1432 case RISCV::BLT:
1433 Cond[0].setImm(RISCV::BGE);
1434 break;
1435 case RISCV::BGE:
1436 Cond[0].setImm(RISCV::BLT);
1437 break;
1438 case RISCV::BLTU:
1439 Cond[0].setImm(RISCV::BGEU);
1440 break;
1441 case RISCV::BGEU:
1442 Cond[0].setImm(RISCV::BLTU);
1443 break;
1444 case RISCV::CV_BEQIMM:
1445 Cond[0].setImm(RISCV::CV_BNEIMM);
1446 break;
1447 case RISCV::CV_BNEIMM:
1448 Cond[0].setImm(RISCV::CV_BEQIMM);
1449 break;
1450 case RISCV::QC_BEQI:
1451 Cond[0].setImm(RISCV::QC_BNEI);
1452 break;
1453 case RISCV::QC_BNEI:
1454 Cond[0].setImm(RISCV::QC_BEQI);
1455 break;
1456 case RISCV::QC_BGEI:
1457 Cond[0].setImm(RISCV::QC_BLTI);
1458 break;
1459 case RISCV::QC_BLTI:
1460 Cond[0].setImm(RISCV::QC_BGEI);
1461 break;
1462 case RISCV::QC_BGEUI:
1463 Cond[0].setImm(RISCV::QC_BLTUI);
1464 break;
1465 case RISCV::QC_BLTUI:
1466 Cond[0].setImm(RISCV::QC_BGEUI);
1467 break;
1468 case RISCV::QC_E_BEQI:
1469 Cond[0].setImm(RISCV::QC_E_BNEI);
1470 break;
1471 case RISCV::QC_E_BNEI:
1472 Cond[0].setImm(RISCV::QC_E_BEQI);
1473 break;
1474 case RISCV::QC_E_BGEI:
1475 Cond[0].setImm(RISCV::QC_E_BLTI);
1476 break;
1477 case RISCV::QC_E_BLTI:
1478 Cond[0].setImm(RISCV::QC_E_BGEI);
1479 break;
1480 case RISCV::QC_E_BGEUI:
1481 Cond[0].setImm(RISCV::QC_E_BLTUI);
1482 break;
1483 case RISCV::QC_E_BLTUI:
1484 Cond[0].setImm(RISCV::QC_E_BGEUI);
1485 break;
1486 case RISCV::NDS_BBC:
1487 Cond[0].setImm(RISCV::NDS_BBS);
1488 break;
1489 case RISCV::NDS_BBS:
1490 Cond[0].setImm(RISCV::NDS_BBC);
1491 break;
1492 case RISCV::NDS_BEQC:
1493 Cond[0].setImm(RISCV::NDS_BNEC);
1494 break;
1495 case RISCV::NDS_BNEC:
1496 Cond[0].setImm(RISCV::NDS_BEQC);
1497 break;
1498 }
1499
1500 return false;
1501}
1502
1503
1504
1506 if (MI->getOpcode() == RISCV::ADDI && MI->getOperand(1).isReg() &&
1507 MI->getOperand(1).getReg() == RISCV::X0) {
1508 Imm = MI->getOperand(2).getImm();
1509 return true;
1510 }
1511 return false;
1512}
1513
1516
1517 if (.isReg())
1518 return false;
1519
1521 if (Reg == RISCV::X0) {
1522 Imm = 0;
1523 return true;
1524 }
1525 return Reg.isVirtual() && isLoadImm(MRI.getVRegDef(Reg), Imm);
1526}
1527
1529 bool IsSigned = false;
1530 bool IsEquality = false;
1531 switch (MI.getOpcode()) {
1532 default:
1533 return false;
1534 case RISCV::BEQ:
1535 case RISCV::BNE:
1536 IsEquality = true;
1537 break;
1538 case RISCV::BGE:
1539 case RISCV::BLT:
1540 IsSigned = true;
1541 break;
1542 case RISCV::BGEU:
1543 case RISCV::BLTU:
1544 break;
1545 }
1546
1549
1553
1556
1557
1558
1559 int64_t C0, C1;
1561 unsigned NewOpc = evaluateCondBranch(CC, C0, C1) ? RISCV::BEQ : RISCV::BNE;
1562
1567 MI.eraseFromParent();
1568 return true;
1569 }
1570
1571 if (IsEquality)
1572 return false;
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593 auto searchConst = [&](int64_t C1) -> Register {
1595 auto DefC1 = std::find_if(++II, E, [&](const MachineInstr &I) -> bool {
1596 int64_t Imm;
1597 return isLoadImm(&I, Imm) && Imm == C1 &&
1598 I.getOperand(0).getReg().isVirtual();
1599 });
1600 if (DefC1 != E)
1601 return DefC1->getOperand(0).getReg();
1602
1604 };
1605
1606 unsigned NewOpc = RISCVCC::getBrCond(getInverseBranchCondition(CC));
1607
1608
1609
1610
1611
1612
1613 if (isFromLoadImm(MRI, LHS, C0) && C0 != 0 && LHS.getReg().isVirtual() &&
1614 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1616 if (Register RegZ = searchConst(C0 + 1)) {
1618 .add(RHS)
1621
1622
1623 MRI.clearKillFlags(RegZ);
1624 MI.eraseFromParent();
1625 return true;
1626 }
1627 }
1628
1629
1630
1631
1632
1633
1634 if (isFromLoadImm(MRI, RHS, C0) && C0 != 0 && RHS.getReg().isVirtual() &&
1635 MRI.hasOneUse(RHS.getReg())) {
1637 if (Register RegZ = searchConst(C0 - 1)) {
1640 .add(LHS)
1642
1643
1644 MRI.clearKillFlags(RegZ);
1645 MI.eraseFromParent();
1646 return true;
1647 }
1648 }
1649
1650 return false;
1651}
1652
1655 assert(MI.getDesc().isBranch() && "Unexpected opcode!");
1656
1657 int NumOp = MI.getNumExplicitOperands();
1658 return MI.getOperand(NumOp - 1).getMBB();
1659}
1660
1662 int64_t BrOffset) const {
1663 unsigned XLen = STI.getXLen();
1664
1665
1666
1667 switch (BranchOp) {
1668 default:
1670 case RISCV::NDS_BBC:
1671 case RISCV::NDS_BBS:
1672 case RISCV::NDS_BEQC:
1673 case RISCV::NDS_BNEC:
1675 case RISCV::BEQ:
1676 case RISCV::BNE:
1677 case RISCV::BLT:
1678 case RISCV::BGE:
1679 case RISCV::BLTU:
1680 case RISCV::BGEU:
1681 case RISCV::BEQI:
1682 case RISCV::BNEI:
1683 case RISCV::CV_BEQIMM:
1684 case RISCV::CV_BNEIMM:
1685 case RISCV::QC_BEQI:
1686 case RISCV::QC_BNEI:
1687 case RISCV::QC_BGEI:
1688 case RISCV::QC_BLTI:
1689 case RISCV::QC_BLTUI:
1690 case RISCV::QC_BGEUI:
1691 case RISCV::QC_E_BEQI:
1692 case RISCV::QC_E_BNEI:
1693 case RISCV::QC_E_BGEI:
1694 case RISCV::QC_E_BLTI:
1695 case RISCV::QC_E_BLTUI:
1696 case RISCV::QC_E_BGEUI:
1698 case RISCV::JAL:
1699 case RISCV::PseudoBR:
1701 case RISCV::PseudoJump:
1703 }
1704}
1705
1706
1707
1708
1710
1711 switch (Opcode) {
1712 case RISCV::ADD: return RISCV::PseudoCCADD;
1713 case RISCV::SUB: return RISCV::PseudoCCSUB;
1714 case RISCV::SLL: return RISCV::PseudoCCSLL;
1715 case RISCV::SRL: return RISCV::PseudoCCSRL;
1716 case RISCV::SRA: return RISCV::PseudoCCSRA;
1717 case RISCV::AND: return RISCV::PseudoCCAND;
1718 case RISCV::OR: return RISCV::PseudoCCOR;
1719 case RISCV::XOR: return RISCV::PseudoCCXOR;
1720 case RISCV::MAX: return RISCV::PseudoCCMAX;
1721 case RISCV::MAXU: return RISCV::PseudoCCMAXU;
1722 case RISCV::MIN: return RISCV::PseudoCCMIN;
1723 case RISCV::MINU: return RISCV::PseudoCCMINU;
1724 case RISCV::MUL: return RISCV::PseudoCCMUL;
1725 case RISCV::LUI: return RISCV::PseudoCCLUI;
1726 case RISCV::QC_LI: return RISCV::PseudoCCQC_LI;
1727 case RISCV::QC_E_LI: return RISCV::PseudoCCQC_E_LI;
1728
1729 case RISCV::ADDI: return RISCV::PseudoCCADDI;
1730 case RISCV::SLLI: return RISCV::PseudoCCSLLI;
1731 case RISCV::SRLI: return RISCV::PseudoCCSRLI;
1732 case RISCV::SRAI: return RISCV::PseudoCCSRAI;
1733 case RISCV::ANDI: return RISCV::PseudoCCANDI;
1734 case RISCV::ORI: return RISCV::PseudoCCORI;
1735 case RISCV::XORI: return RISCV::PseudoCCXORI;
1736
1737 case RISCV::ADDW: return RISCV::PseudoCCADDW;
1738 case RISCV::SUBW: return RISCV::PseudoCCSUBW;
1739 case RISCV::SLLW: return RISCV::PseudoCCSLLW;
1740 case RISCV::SRLW: return RISCV::PseudoCCSRLW;
1741 case RISCV::SRAW: return RISCV::PseudoCCSRAW;
1742
1743 case RISCV::ADDIW: return RISCV::PseudoCCADDIW;
1744 case RISCV::SLLIW: return RISCV::PseudoCCSLLIW;
1745 case RISCV::SRLIW: return RISCV::PseudoCCSRLIW;
1746 case RISCV::SRAIW: return RISCV::PseudoCCSRAIW;
1747
1748 case RISCV::ANDN: return RISCV::PseudoCCANDN;
1749 case RISCV::ORN: return RISCV::PseudoCCORN;
1750 case RISCV::XNOR: return RISCV::PseudoCCXNOR;
1751
1752 case RISCV::NDS_BFOS: return RISCV::PseudoCCNDS_BFOS;
1753 case RISCV::NDS_BFOZ: return RISCV::PseudoCCNDS_BFOZ;
1754 }
1755
1756
1757 return RISCV::INSTRUCTION_LIST_END;
1758}
1759
1760
1761
1766 if (.isVirtual())
1767 return nullptr;
1768 if (.hasOneNonDBGUse(Reg))
1769 return nullptr;
1771 if ()
1772 return nullptr;
1773
1774 if (!STI.hasShortForwardBranchIMinMax() &&
1775 (MI->getOpcode() == RISCV::MAX || MI->getOpcode() == RISCV::MIN ||
1776 MI->getOpcode() == RISCV::MINU || MI->getOpcode() == RISCV::MAXU))
1777 return nullptr;
1778
1779 if (!STI.hasShortForwardBranchIMul() && MI->getOpcode() == RISCV::MUL)
1780 return nullptr;
1781
1782
1784 return nullptr;
1785
1786 if (MI->getOpcode() == RISCV::ADDI && MI->getOperand(1).isReg() &&
1787 MI->getOperand(1).getReg() == RISCV::X0)
1788 return nullptr;
1789
1791
1792 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1793 return nullptr;
1794 if (!MO.isReg())
1795 continue;
1796
1797 if (MO.isTied())
1798 return nullptr;
1799 if (MO.isDef())
1800 return nullptr;
1801
1802 if (MO.getReg().isPhysical() && .isConstantPhysReg(MO.getReg()))
1803 return nullptr;
1804 }
1805 bool DontMoveAcrossStores = true;
1806 if (->isSafeToMove(DontMoveAcrossStores))
1807 return nullptr;
1808 return MI;
1809}
1810
1813 unsigned &TrueOp, unsigned &FalseOp,
1814 bool &Optimizable) const {
1815 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1816 "Unknown select instruction");
1817
1818
1819
1820
1821
1822
1823
1824 TrueOp = 5;
1825 FalseOp = 4;
1826 Cond.push_back(MI.getOperand(1));
1827 Cond.push_back(MI.getOperand(2));
1828 Cond.push_back(MI.getOperand(3));
1829
1830 Optimizable = STI.hasShortForwardBranchIALU();
1831 return false;
1832}
1833
1837 bool PreferFalse) const {
1838 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1839 "Unknown select instruction");
1840 if (.hasShortForwardBranchIALU())
1841 return nullptr;
1842
1846 bool Invert = ;
1850 return nullptr;
1851
1852
1854 Register DestReg = MI.getOperand(0).getReg();
1856 if (.constrainRegClass(DestReg, PreviousClass))
1857 return nullptr;
1858
1860 assert(PredOpc != RISCV::INSTRUCTION_LIST_END && "Unexpected opcode!");
1861
1862
1864 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(PredOpc), DestReg);
1865
1866
1867 NewMI.add(MI.getOperand(1));
1868 NewMI.add(MI.getOperand(2));
1869
1870
1872 if (Invert)
1875
1876
1877 NewMI.add(FalseReg);
1878
1879
1881 for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e; ++i)
1882 NewMI.add(DefMI->getOperand(i));
1883
1884
1885 SeenMIs.insert(NewMI);
1887
1888
1889
1890
1891
1892 if (DefMI->getParent() != MI.getParent())
1894
1895
1896 DefMI->eraseFromParent();
1897 return NewMI;
1898}
1899
1901 if (MI.isMetaInstruction())
1902 return 0;
1903
1904 unsigned Opcode = MI.getOpcode();
1905
1906 if (Opcode == TargetOpcode::INLINEASM ||
1907 Opcode == TargetOpcode::INLINEASM_BR) {
1909 return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
1911 }
1912
1913 if (.memoperands_empty()) {
1916 if (STI.hasStdExtZca()) {
1917 if (isCompressibleInst(MI, STI))
1918 return 4;
1919 return 6;
1920 }
1921 return 8;
1922 }
1923 }
1924
1925 if (Opcode == TargetOpcode::BUNDLE)
1926 return getInstBundleLength(MI);
1927
1928 if (MI.getParent() && MI.getParent()->getParent()) {
1929 if (isCompressibleInst(MI, STI))
1930 return 2;
1931 }
1932
1933 switch (Opcode) {
1934 case RISCV::PseudoMV_FPR16INX:
1935 case RISCV::PseudoMV_FPR32INX:
1936
1937 return STI.hasStdExtZca() ? 2 : 4;
1938 case TargetOpcode::STACKMAP:
1939
1941 case TargetOpcode::PATCHPOINT:
1942
1944 case TargetOpcode::STATEPOINT: {
1945
1947
1948 return std::max(NumBytes, 8U);
1949 }
1950 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1951 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1952 case TargetOpcode::PATCHABLE_TAIL_CALL: {
1955 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
1956 F.hasFnAttribute("patchable-function-entry")) {
1957 unsigned Num;
1958 if (F.getFnAttribute("patchable-function-entry")
1959 .getValueAsString()
1960 .getAsInteger(10, Num))
1961 return get(Opcode).getSize();
1962
1963
1964 return (STI.hasStdExtZca() ? 2 : 4) * Num;
1965 }
1966
1967
1968 return STI.is64Bit() ? 68 : 44;
1969 }
1970 default:
1971 return get(Opcode).getSize();
1972 }
1973}
1974
1975unsigned RISCVInstrInfo::getInstBundleLength(const MachineInstr &MI) const {
1976 unsigned Size = 0;
1979 while (++I != E && I->isInsideBundle()) {
1980 assert(->isBundle() && "No nested bundle!");
1982 }
1983 return Size;
1984}
1985
1987 const unsigned Opcode = MI.getOpcode();
1988 switch (Opcode) {
1989 default:
1990 break;
1991 case RISCV::FSGNJ_D:
1992 case RISCV::FSGNJ_S:
1993 case RISCV::FSGNJ_H:
1994 case RISCV::FSGNJ_D_INX:
1995 case RISCV::FSGNJ_D_IN32X:
1996 case RISCV::FSGNJ_S_INX:
1997 case RISCV::FSGNJ_H_INX:
1998
1999 return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
2000 MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
2001 case RISCV::ADDI:
2002 case RISCV::ORI:
2003 case RISCV::XORI:
2004 return (MI.getOperand(1).isReg() &&
2005 MI.getOperand(1).getReg() == RISCV::X0) ||
2006 (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
2007 }
2008 return MI.isAsCheapAsAMove();
2009}
2010
2011std::optional
2013 if (MI.isMoveReg())
2015 switch (MI.getOpcode()) {
2016 default:
2017 break;
2018 case RISCV::ADD:
2019 case RISCV::OR:
2020 case RISCV::XOR:
2021 if (MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0 &&
2022 MI.getOperand(2).isReg())
2024 if (MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0 &&
2025 MI.getOperand(1).isReg())
2027 break;
2028 case RISCV::ADDI:
2029
2030 if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
2031 MI.getOperand(2).getImm() == 0)
2033 break;
2034 case RISCV::SUB:
2035 if (MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0 &&
2036 MI.getOperand(1).isReg())
2038 break;
2039 case RISCV::SH1ADD:
2040 case RISCV::SH1ADD_UW:
2041 case RISCV::SH2ADD:
2042 case RISCV::SH2ADD_UW:
2043 case RISCV::SH3ADD:
2044 case RISCV::SH3ADD_UW:
2045 if (MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0 &&
2046 MI.getOperand(2).isReg())
2048 break;
2049 case RISCV::FSGNJ_D:
2050 case RISCV::FSGNJ_S:
2051 case RISCV::FSGNJ_H:
2052 case RISCV::FSGNJ_D_INX:
2053 case RISCV::FSGNJ_D_IN32X:
2054 case RISCV::FSGNJ_S_INX:
2055 case RISCV::FSGNJ_H_INX:
2056
2057 if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
2058 MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
2060 break;
2061 }
2062 return std::nullopt;
2063}
2064
2067
2068
2069
2070 const auto &SchedModel = STI.getSchedModel();
2071 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2074 }
2075
2077}
2078
2082 int16_t FrmOpIdx =
2083 RISCV::getNamedOperandIdx(Root.getOpcode(), RISCV::OpName::frm);
2084 if (FrmOpIdx < 0) {
2087 return RISCV::getNamedOperandIdx(MI->getOpcode(),
2088 RISCV::OpName::frm) < 0;
2089 }) &&
2090 "New instructions require FRM whereas the old one does not have it");
2091 return;
2092 }
2093
2096
2097 for (auto *NewMI : InsInstrs) {
2098
2099 if (static_cast<unsigned>(RISCV::getNamedOperandIdx(
2100 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2101 continue;
2103 MIB.add(FRM);
2106 }
2107}
2108
2110 switch (Opc) {
2111 default:
2112 return false;
2113 case RISCV::FADD_H:
2114 case RISCV::FADD_S:
2115 case RISCV::FADD_D:
2116 return true;
2117 }
2118}
2119
2121 switch (Opc) {
2122 default:
2123 return false;
2124 case RISCV::FSUB_H:
2125 case RISCV::FSUB_S:
2126 case RISCV::FSUB_D:
2127 return true;
2128 }
2129}
2130
2132 switch (Opc) {
2133 default:
2134 return false;
2135 case RISCV::FMUL_H:
2136 case RISCV::FMUL_S:
2137 case RISCV::FMUL_D:
2138 return true;
2139 }
2140}
2141
2142bool RISCVInstrInfo::isVectorAssociativeAndCommutative(const MachineInstr &Inst,
2143 bool Invert) const {
2144#define OPCODE_LMUL_CASE(OPC) \
2145 case RISCV::OPC##_M1: \
2146 case RISCV::OPC##_M2: \
2147 case RISCV::OPC##_M4: \
2148 case RISCV::OPC##_M8: \
2149 case RISCV::OPC##_MF2: \
2150 case RISCV::OPC##_MF4: \
2151 case RISCV::OPC##_MF8
2152
2153#define OPCODE_LMUL_MASK_CASE(OPC) \
2154 case RISCV::OPC##_M1_MASK: \
2155 case RISCV::OPC##_M2_MASK: \
2156 case RISCV::OPC##_M4_MASK: \
2157 case RISCV::OPC##_M8_MASK: \
2158 case RISCV::OPC##_MF2_MASK: \
2159 case RISCV::OPC##_MF4_MASK: \
2160 case RISCV::OPC##_MF8_MASK
2161
2162 unsigned Opcode = Inst.getOpcode();
2163 if (Invert) {
2165 Opcode = *InvOpcode;
2166 else
2167 return false;
2168 }
2169
2170
2171 switch (Opcode) {
2172 default:
2173 return false;
2178 return true;
2179 }
2180
2181
2182#undef OPCODE_LMUL_MASK_CASE
2183#undef OPCODE_LMUL_CASE
2184}
2185
2186bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &Root,
2189 return false;
2190
2193 const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
2194
2195
2197 const uint64_t TSFlags = Desc.TSFlags;
2198
2199 auto checkImmOperand = [&](unsigned OpIdx) {
2201 };
2202
2203 auto checkRegOperand = [&](unsigned OpIdx) {
2205 };
2206
2207
2208
2209
2210
2211 if (!checkRegOperand(1))
2212 return false;
2213
2214
2217 return false;
2218
2219
2221 const MachineBasicBlock *MBB = Root.getParent();
2225
2226 bool SeenMI2 = false;
2227 for (auto End = MBB->rend(), It = It1; It != End; ++It) {
2228 if (It == It2) {
2229 SeenMI2 = true;
2231
2232
2233 break;
2234 }
2235
2236 if (It->modifiesRegister(RISCV::V0, TRI)) {
2237 Register SrcReg = It->getOperand(1).getReg();
2238
2239
2241 return false;
2242
2243 if (!MI1VReg.isValid()) {
2244
2245 MI1VReg = SrcReg;
2246 continue;
2247 }
2248
2249
2250 if (!SeenMI2)
2251 continue;
2252
2253
2254
2255 if (MI1VReg != SrcReg)
2256 return false;
2257 else
2258 break;
2259 }
2260 }
2261
2262
2263
2264 assert(SeenMI2 && "Prev is expected to appear before Root");
2265 }
2266
2267
2270 return false;
2271
2272
2278 return false;
2279 switch (Op1.getType()) {
2282 return false;
2283 break;
2286 return false;
2287 break;
2288 default:
2290 }
2291 }
2292
2293
2296 return false;
2297
2298 return true;
2299}
2300
2301
2302
2303bool RISCVInstrInfo::hasReassociableVectorSibling(const MachineInstr &Inst,
2304 bool &Commuted) const {
2305 const MachineBasicBlock *MBB = Inst.getParent();
2308 "Expect the present of passthrough operand.");
2311
2312
2313
2314 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2315 areRVVInstsReassociable(Inst, *MI2);
2316 if (Commuted)
2318
2319 return areRVVInstsReassociable(Inst, *MI1) &&
2320 (isVectorAssociativeAndCommutative(*MI1) ||
2321 isVectorAssociativeAndCommutative(*MI1, true)) &&
2324}
2325
2328 if (!isVectorAssociativeAndCommutative(Inst) &&
2329 !isVectorAssociativeAndCommutative(Inst, true))
2331
2335
2336
2337
2341 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
2343 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
2344
2345
2347}
2348
2354
2355 for (unsigned I = 0; I < 5; ++I)
2357 }
2358}
2359
2361 bool &Commuted) const {
2362 if (isVectorAssociativeAndCommutative(Inst) ||
2363 isVectorAssociativeAndCommutative(Inst, true))
2364 return hasReassociableVectorSibling(Inst, Commuted);
2365
2367 return false;
2368
2370 unsigned OperandIdx = Commuted ? 2 : 1;
2373
2374 int16_t InstFrmOpIdx =
2375 RISCV::getNamedOperandIdx(Inst.getOpcode(), RISCV::OpName::frm);
2376 int16_t SiblingFrmOpIdx =
2377 RISCV::getNamedOperandIdx(Sibling.getOpcode(), RISCV::OpName::frm);
2378
2379 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2381}
2382
2384 bool Invert) const {
2385 if (isVectorAssociativeAndCommutative(Inst, Invert))
2386 return true;
2387
2389 if (Invert) {
2391 if (!InverseOpcode)
2392 return false;
2393 Opc = *InverseOpcode;
2394 }
2395
2399
2400 switch (Opc) {
2401 default:
2402 return false;
2403 case RISCV::ADD:
2404 case RISCV::ADDW:
2405 case RISCV::AND:
2406 case RISCV::OR:
2407 case RISCV::XOR:
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420 case RISCV::MUL:
2421 case RISCV::MULW:
2422 case RISCV::MIN:
2423 case RISCV::MINU:
2424 case RISCV::MAX:
2425 case RISCV::MAXU:
2426 case RISCV::FMIN_H:
2427 case RISCV::FMIN_S:
2428 case RISCV::FMIN_D:
2429 case RISCV::FMAX_H:
2430 case RISCV::FMAX_S:
2431 case RISCV::FMAX_D:
2432 return true;
2433 }
2434
2435 return false;
2436}
2437
2438std::optional
2440#define RVV_OPC_LMUL_CASE(OPC, INV) \
2441 case RISCV::OPC##_M1: \
2442 return RISCV::INV##_M1; \
2443 case RISCV::OPC##_M2: \
2444 return RISCV::INV##_M2; \
2445 case RISCV::OPC##_M4: \
2446 return RISCV::INV##_M4; \
2447 case RISCV::OPC##_M8: \
2448 return RISCV::INV##_M8; \
2449 case RISCV::OPC##_MF2: \
2450 return RISCV::INV##_MF2; \
2451 case RISCV::OPC##_MF4: \
2452 return RISCV::INV##_MF4; \
2453 case RISCV::OPC##_MF8: \
2454 return RISCV::INV##_MF8
2455
2456#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2457 case RISCV::OPC##_M1_MASK: \
2458 return RISCV::INV##_M1_MASK; \
2459 case RISCV::OPC##_M2_MASK: \
2460 return RISCV::INV##_M2_MASK; \
2461 case RISCV::OPC##_M4_MASK: \
2462 return RISCV::INV##_M4_MASK; \
2463 case RISCV::OPC##_M8_MASK: \
2464 return RISCV::INV##_M8_MASK; \
2465 case RISCV::OPC##_MF2_MASK: \
2466 return RISCV::INV##_MF2_MASK; \
2467 case RISCV::OPC##_MF4_MASK: \
2468 return RISCV::INV##_MF4_MASK; \
2469 case RISCV::OPC##_MF8_MASK: \
2470 return RISCV::INV##_MF8_MASK
2471
2472 switch (Opcode) {
2473 default:
2474 return std::nullopt;
2475 case RISCV::FADD_H:
2476 return RISCV::FSUB_H;
2477 case RISCV::FADD_S:
2478 return RISCV::FSUB_S;
2479 case RISCV::FADD_D:
2480 return RISCV::FSUB_D;
2481 case RISCV::FSUB_H:
2482 return RISCV::FADD_H;
2483 case RISCV::FSUB_S:
2484 return RISCV::FADD_S;
2485 case RISCV::FSUB_D:
2486 return RISCV::FADD_D;
2487 case RISCV::ADD:
2488 return RISCV::SUB;
2489 case RISCV::SUB:
2490 return RISCV::ADD;
2491 case RISCV::ADDW:
2492 return RISCV::SUBW;
2493 case RISCV::SUBW:
2494 return RISCV::ADDW;
2495
2500
2501 }
2502
2503#undef RVV_OPC_LMUL_MASK_CASE
2504#undef RVV_OPC_LMUL_CASE
2505}
2506
2509 bool DoRegPressureReduce) {
2511 return false;
2514 if ( ||
(MI->getOpcode()))
2515 return false;
2516
2519 return false;
2520
2521
2522
2523
2524
2525 if (DoRegPressureReduce && .hasOneNonDBGUse(MI->getOperand(0).getReg()))
2526 return false;
2527
2528
2529 if (Root.getParent() != MI->getParent())
2530 return false;
2532}
2533
2536 bool DoRegPressureReduce) {
2540 return false;
2541 bool Added = false;
2543 DoRegPressureReduce)) {
2546 Added = true;
2547 }
2549 DoRegPressureReduce)) {
2552 Added = true;
2553 }
2554 return Added;
2555}
2556
2559 bool DoRegPressureReduce) {
2561}
2562
2563
2564
2567 unsigned CombineOpc) {
2570
2573
2574 if ( || MI->getParent() != &MBB || MI->getOpcode() != CombineOpc)
2575 return nullptr;
2576
2577 if (.hasOneNonDBGUse(MI->getOperand(0).getReg()))
2578 return nullptr;
2579
2580 return MI;
2581}
2582
2583
2584
2585
2588 unsigned OuterShiftAmt) {
2590 if (!ShiftMI)
2591 return false;
2592
2594 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2595 return false;
2596
2597 return true;
2598}
2599
2600
2601
2603 switch (Opc) {
2604 default:
2605 return 0;
2606 case RISCV::SH1ADD:
2607 return 1;
2608 case RISCV::SH2ADD:
2609 return 2;
2610 case RISCV::SH3ADD:
2611 return 3;
2612 }
2613}
2614
2615
2616
2618 switch (Opc) {
2619 default:
2620 return 0;
2621 case RISCV::SH1ADD_UW:
2622 return 1;
2623 case RISCV::SH2ADD_UW:
2624 return 2;
2625 case RISCV::SH3ADD_UW:
2626 return 3;
2627 }
2628}
2629
2630
2631
2635 if (!ShiftAmt)
2636 return false;
2637
2639
2641 if (!AddMI)
2642 return false;
2643
2644 bool Found = false;
2647 Found = true;
2648 }
2651 Found = true;
2652 }
2653
2654 return Found;
2655}
2656
2664 default:
2666 }
2667}
2668
2671 bool DoRegPressureReduce) const {
2672
2673 if (getFPPatterns(Root, Patterns, DoRegPressureReduce))
2674 return true;
2675
2677 return true;
2678
2680 DoRegPressureReduce);
2681}
2682
2684 switch (RootOpc) {
2685 default:
2687 case RISCV::FADD_H:
2688 return RISCV::FMADD_H;
2689 case RISCV::FADD_S:
2690 return RISCV::FMADD_S;
2691 case RISCV::FADD_D:
2692 return RISCV::FMADD_D;
2693 case RISCV::FSUB_H:
2695 : RISCV::FNMSUB_H;
2696 case RISCV::FSUB_S:
2698 : RISCV::FNMSUB_S;
2699 case RISCV::FSUB_D:
2701 : RISCV::FNMSUB_D;
2702 }
2703}
2704
2707 default:
2711 return 2;
2714 return 1;
2715 }
2716}
2717
2725
2730
2731 Register DstReg = Dst.getReg();
2736
2737 bool Mul1IsKill = Mul1.isKill();
2738 bool Mul2IsKill = Mul2.isKill();
2739 bool AddendIsKill = Addend.isKill();
2740
2741
2742
2743
2744 MRI.clearKillFlags(Mul1.getReg());
2745 MRI.clearKillFlags(Mul2.getReg());
2746
2748 BuildMI(*MF, MergedLoc, TII->get(FusedOpc), DstReg)
2753
2758}
2759
2760
2761
2762
2763static void
2771
2773 assert(OuterShiftAmt != 0 && "Unexpected opcode");
2774
2778
2780 assert(InnerShiftAmt >= OuterShiftAmt && "Unexpected shift amount");
2781
2782 unsigned InnerOpc;
2783 switch (InnerShiftAmt - OuterShiftAmt) {
2784 default:
2786 case 0:
2787 InnerOpc = RISCV::ADD;
2788 break;
2789 case 1:
2790 InnerOpc = RISCV::SH1ADD;
2791 break;
2792 case 2:
2793 InnerOpc = RISCV::SH2ADD;
2794 break;
2795 case 3:
2796 InnerOpc = RISCV::SH3ADD;
2797 break;
2798 }
2799
2803
2804 Register NewVR = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2805
2813
2814 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2820}
2821
2829 default:
2831 DelInstrs, InstrIdxForVirtReg);
2832 return;
2837 return;
2838 }
2843 return;
2844 }
2846 genShXAddAddShift(Root, 1, InsInstrs, DelInstrs, InstrIdxForVirtReg);
2847 return;
2849 genShXAddAddShift(Root, 2, InsInstrs, DelInstrs, InstrIdxForVirtReg);
2850 return;
2851 }
2852}
2853
2857
2858 for (const auto &[Index, Operand] : enumerate(Desc.operands())) {
2860 unsigned OpType = Operand.OperandType;
2861 switch (OpType) {
2862 default:
2865 if (!MO.isImm()) {
2866 ErrInfo = "Expected an immediate operand.";
2867 return false;
2868 }
2869 int64_t Imm = MO.getImm();
2870 bool Ok;
2871 switch (OpType) {
2872 default:
2874
2875
2876#define CASE_OPERAND_UIMM(NUM) \
2877 case RISCVOp::OPERAND_UIMM##NUM: \
2878 Ok = isUInt(Imm); \
2879 break;
2880#define CASE_OPERAND_SIMM(NUM) \
2881 case RISCVOp::OPERAND_SIMM##NUM: \
2882 Ok = isInt(Imm); \
2883 break;
2899
2902 break;
2905 break;
2907 Ok = isUInt<5>(Imm) && (Imm != 0);
2908 break;
2910 Ok = isUInt<5>(Imm) && (Imm > 3);
2911 break;
2913 Ok = Imm >= 1 && Imm <= 32;
2914 break;
2917 break;
2920 break;
2923 break;
2926 break;
2929 break;
2931 Ok = isUInt<8>(Imm) && Imm >= 32;
2932 break;
2935 break;
2938 break;
2941 break;
2944 break;
2947 break;
2949 Ok = Imm == 3;
2950 break;
2952 Ok = Imm == 4;
2953 break;
2955 Ok = (isUInt<5>(Imm) && Imm != 0) || Imm == -1;
2956 break;
2957
2963
2965 Ok = Imm >= -15 && Imm <= 16;
2966 break;
2968 Ok = isInt<5>(Imm) && (Imm != 0);
2969 break;
2971 Ok = Imm != 0 && isInt<6>(Imm);
2972 break;
2975 break;
2978 break;
2981 break;
2983 Ok = isInt<16>(Imm) && (Imm != 0);
2984 break;
2987 break;
2990 break;
2993 Ok = Ok && Imm != 0;
2994 break;
2996 Ok = (isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
2997 break;
2999 Ok = Imm >= 0 && Imm <= 10;
3000 break;
3002 Ok = Imm >= 0 && Imm <= 7;
3003 break;
3005 Ok = Imm >= 1 && Imm <= 10;
3006 break;
3008 Ok = Imm >= 2 && Imm <= 14;
3009 break;
3012 break;
3015 break;
3017 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3018 break;
3021 break;
3024 break;
3027 break;
3030 Imm;
3031 break;
3034 break;
3036 Ok = Imm == 0;
3037 break;
3042 else
3044 break;
3047 break;
3048 }
3049 if (!Ok) {
3050 ErrInfo = "Invalid immediate";
3051 return false;
3052 }
3053 }
3054 break;
3056
3057
3058 if (MO.isReg()) {
3059 ErrInfo = "Expected a non-register operand.";
3060 return false;
3061 }
3063 ErrInfo = "Invalid immediate";
3064 return false;
3065 }
3066 break;
3069
3070
3071 if (MO.isReg()) {
3072 ErrInfo = "Expected a non-register operand.";
3073 return false;
3074 }
3076 ErrInfo = "Invalid immediate";
3077 return false;
3078 }
3079 break;
3081
3082
3083 if (MO.isReg()) {
3084 ErrInfo = "Expected a non-register operand.";
3085 return false;
3086 }
3088 ErrInfo = "Invalid immediate";
3089 return false;
3090 }
3091 break;
3093 if (MO.isImm()) {
3094 int64_t Imm = MO.getImm();
3095
3096 if ((Imm) && Imm != -1) {
3097 ErrInfo = "Invalid immediate";
3098 return false;
3099 }
3100 } else if (!MO.isReg()) {
3101 ErrInfo = "Expected a register or immediate operand.";
3102 return false;
3103 }
3104 break;
3105 }
3106 }
3107
3111 if (.isImm() &&
.isReg()) {
3112 ErrInfo = "Invalid operand type for VL operand";
3113 return false;
3114 }
3115 if (Op.isReg() && Op.getReg().isValid()) {
3117 auto *RC = MRI.getRegClass(Op.getReg());
3118 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3119 ErrInfo = "Invalid register class for VL operand";
3120 return false;
3121 }
3122 }
3124 ErrInfo = "VL operand w/o SEW operand?";
3125 return false;
3126 }
3127 }
3130 if (.getOperand(OpIdx).isImm()) {
3131 ErrInfo = "SEW value expected to be an immediate";
3132 return false;
3133 }
3135 if (Log2SEW > 31) {
3136 ErrInfo = "Unexpected SEW value";
3137 return false;
3138 }
3139 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3141 ErrInfo = "Unexpected SEW value";
3142 return false;
3143 }
3144 }
3147 if (.getOperand(OpIdx).isImm()) {
3148 ErrInfo = "Policy operand expected to be an immediate";
3149 return false;
3150 }
3153 ErrInfo = "Invalid Policy Value";
3154 return false;
3155 }
3157 ErrInfo = "policy operand w/o VL operand?";
3158 return false;
3159 }
3160
3161
3162
3163
3164 unsigned UseOpIdx;
3165 if (.isRegTiedToUseOperand(0, &UseOpIdx)) {
3166 ErrInfo = "policy operand w/o tied operand?";
3167 return false;
3168 }
3169 }
3170
3173 .readsRegister(RISCV::FRM, nullptr)) {
3174 ErrInfo = "dynamic rounding mode should read FRM";
3175 return false;
3176 }
3177
3178 return true;
3179}
3180
3185 default:
3186 return false;
3187 case RISCV::LB:
3188 case RISCV::LBU:
3189 case RISCV::LH:
3190 case RISCV::LH_INX:
3191 case RISCV::LHU:
3192 case RISCV::LW:
3193 case RISCV::LW_INX:
3194 case RISCV::LWU:
3195 case RISCV::LD:
3196 case RISCV::LD_RV32:
3197 case RISCV::FLH:
3198 case RISCV::FLW:
3199 case RISCV::FLD:
3200 case RISCV::SB:
3201 case RISCV::SH:
3202 case RISCV::SH_INX:
3203 case RISCV::SW:
3204 case RISCV::SW_INX:
3205 case RISCV::SD:
3206 case RISCV::SD_RV32:
3207 case RISCV::FSH:
3208 case RISCV::FSW:
3209 case RISCV::FSD:
3210 break;
3211 }
3212
3214 return false;
3215
3218 return false;
3219
3222 int64_t NewOffset = OldOffset + Disp;
3223 if (.is64Bit())
3225
3227 return false;
3228
3234 return true;
3235}
3236
3239
3242
3244 "Addressing mode not supported for folding");
3245
3253}
3254
3255
3256
3257
3259 switch (Opc) {
3260 default:
3261 return false;
3262 case RISCV::SW:
3263 case RISCV::SD:
3264 case RISCV::LD:
3265 case RISCV::LW:
3266 return true;
3267 }
3268}
3269
3272
3274 return false;
3275
3277 return true;
3278
3280
3281
3283 return false;
3284
3286 return false;
3287
3288 return true;
3289}
3290
3296 return false;
3297
3298
3300 case RISCV::LB:
3301 case RISCV::LBU:
3302 case RISCV::SB:
3303 case RISCV::LH:
3304 case RISCV::LH_INX:
3305 case RISCV::LHU:
3306 case RISCV::FLH:
3307 case RISCV::SH:
3308 case RISCV::SH_INX:
3309 case RISCV::FSH:
3310 case RISCV::LW:
3311 case RISCV::LW_INX:
3312 case RISCV::LWU:
3313 case RISCV::FLW:
3314 case RISCV::SW:
3315 case RISCV::SW_INX:
3316 case RISCV::FSW:
3317 case RISCV::LD:
3318 case RISCV::LD_RV32:
3319 case RISCV::FLD:
3320 case RISCV::SD:
3321 case RISCV::SD_RV32:
3322 case RISCV::FSD:
3323 break;
3324 default:
3325 return false;
3326 }
3328 OffsetIsScalable = false;
3330 return false;
3332 return true;
3333}
3334
3335
3336
3341
3342
3343
3344 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front()))
3345 return true;
3346
3348 return false;
3349
3352 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3353 return false;
3354
3355 auto Base1 = MO1->getValue();
3356 auto Base2 = MO2->getValue();
3357 if (!Base1 || !Base2)
3358 return false;
3361
3363 return false;
3364
3365 return Base1 == Base2;
3366}
3367
3371 int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize,
3372 unsigned NumBytes) const {
3373
3374
3375 if (!BaseOps1.empty() && !BaseOps2.empty()) {
3377 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
3379 return false;
3380 } else if (!BaseOps1.empty() || !BaseOps2.empty()) {
3381
3382 return false;
3383 }
3384
3386 BaseOps1.front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3387
3389
3390
3391
3392 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) < CacheLineSize;
3393}
3394
3395
3396
3397
3398
3399
3400
3401
3406 return false;
3407
3408
3409
3410
3412 return false;
3415 return false;
3416
3418 return false;
3419
3423 return true;
3424}
3425
3430
3433 return false;
3434
3435
3436
3437
3438
3439
3441 const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
3442 int64_t OffsetA = 0, OffsetB = 0;
3448 int LowOffset = std::min(OffsetA, OffsetB);
3449 int HighOffset = std::max(OffsetA, OffsetB);
3450 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3452 LowOffset + (int)LowWidth.getValue() <= HighOffset)
3453 return true;
3454 }
3455 }
3456 return false;
3457}
3458
3459std::pair<unsigned, unsigned>
3462 return std::make_pair(TF & Mask, TF & ~Mask);
3463}
3464
3467 using namespace RISCVII;
3468 static const std::pair<unsigned, const char *> TargetFlags[] = {
3469 {MO_CALL, "riscv-call"},
3470 {MO_LO, "riscv-lo"},
3471 {MO_HI, "riscv-hi"},
3472 {MO_PCREL_LO, "riscv-pcrel-lo"},
3473 {MO_PCREL_HI, "riscv-pcrel-hi"},
3474 {MO_GOT_HI, "riscv-got-hi"},
3475 {MO_TPREL_LO, "riscv-tprel-lo"},
3476 {MO_TPREL_HI, "riscv-tprel-hi"},
3477 {MO_TPREL_ADD, "riscv-tprel-add"},
3478 {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
3479 {MO_TLS_GD_HI, "riscv-tls-gd-hi"},
3480 {MO_TLSDESC_HI, "riscv-tlsdesc-hi"},
3481 {MO_TLSDESC_LOAD_LO, "riscv-tlsdesc-load-lo"},
3482 {MO_TLSDESC_ADD_LO, "riscv-tlsdesc-add-lo"},
3483 {MO_TLSDESC_CALL, "riscv-tlsdesc-call"}};
3484 return ArrayRef(TargetFlags);
3485}
3487 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
3489
3490
3491 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
3492 return false;
3493
3494
3495
3496 if (F.hasSection())
3497 return false;
3498
3499
3500 return true;
3501}
3502
3504 unsigned &Flags) const {
3505
3507}
3508
3509
3514
3519
3523 return F.getFnAttribute("fentry-call").getValueAsBool() ||
3524 F.hasFnAttribute("patchable-function-entry");
3525}
3526
3529 return MI.readsRegister(RegNo, TRI) ||
3530 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3531}
3532
3535 return MI.modifiesRegister(RegNo, TRI) ||
3536 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3537}
3538
3540 if (.back().isReturn())
3541 return true;
3543 return true;
3544
3545
3546
3547
3553 return true;
3555 break;
3556 }
3557 return false;
3558}
3559
3561
3562
3563 if (C.back().isReturn()) {
3565 "The candidate who uses return instruction must be outlined "
3566 "using tail call");
3567 return false;
3568 }
3569
3570
3571
3575 }))
3576 return true;
3577
3578 return .isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);
3579}
3580
3581std::optional<std::unique_ptroutliner::OutlinedFunction>
3584 std::vectoroutliner::Candidate &RepeatedSequenceLocs,
3585 unsigned MinRepeats) const {
3586
3587
3589
3590
3591 if (RepeatedSequenceLocs.size() < MinRepeats)
3592 return std::nullopt;
3593
3594
3596 unsigned InstrSizeCExt =
3598 unsigned CallOverhead = 0, FrameOverhead = 0;
3599
3600
3601 unsigned CFICount = 0;
3602 for (auto &I : Candidate) {
3603 if (I.isCFIInstruction())
3604 CFICount++;
3605 }
3606
3607
3608
3609
3610
3611
3612
3614 std::vector CFIInstructions =
3615 C.getMF()->getFrameInstructions();
3616
3617 if (CFICount > 0 && CFICount != CFIInstructions.size())
3618 return std::nullopt;
3619 }
3620
3624
3625
3626 CallOverhead = 4 + InstrSizeCExt;
3627
3628 FrameOverhead = 0;
3629 } else {
3630
3631 CallOverhead = 8;
3632
3633 FrameOverhead = InstrSizeCExt;
3634 }
3635
3636
3637
3639 return std::nullopt;
3640
3641 for (auto &C : RepeatedSequenceLocs)
3642 C.setCallInfo(MOCI, CallOverhead);
3643
3644 unsigned SequenceSize = 0;
3645 for (auto &MI : Candidate)
3647
3648 return std::make_uniqueoutliner::OutlinedFunction(
3649 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3650}
3651
3655 unsigned Flags) const {
3659 MBB->getParent()->getSubtarget().getRegisterInfo();
3660 const auto &F = MI.getMF()->getFunction();
3661
3662
3663
3664
3665 if (MI.isCFIInstruction())
3667
3671
3672
3673 for (const auto &MO : MI.operands()) {
3674
3675
3676
3678 (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() ||
3679 F.hasSection() || F.getSectionPrefix()))
3681 }
3682
3683 if (isLPAD(MI))
3685
3687}
3688
3692
3694 return;
3695
3696 MBB.addLiveIn(RISCV::X5);
3697
3698
3701 .addReg(RISCV::X5)
3702 .addImm(0));
3703}
3704
3708
3711 .addGlobalAddress(M.getNamedValue(MF.getName()),
3713 return It;
3714 }
3715
3716
3717 It = MBB.insert(It,
3719 .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
3721 return It;
3722}
3723
3726
3727
3729 if (!Op0.isReg() || Reg != Op0.getReg())
3730 return std::nullopt;
3731
3732
3733
3734 if (MI.getOpcode() == RISCV::ADDI && MI.getOperand(1).isReg() &&
3735 MI.getOperand(2).isImm())
3736 return RegImmPair{MI.getOperand(1).getReg(), MI.getOperand(2).getImm()};
3737
3738 return std::nullopt;
3739}
3740
3741
3745
3746 std::string GenericComment =
3748 if (!GenericComment.empty())
3749 return GenericComment;
3750
3751
3752 if (.isImm())
3753 return std::string();
3754
3756 if (OpIdx >= Desc.getNumOperands())
3757 return std::string();
3758
3759 std::string Comment;
3761
3763
3764
3765
3766 switch (OpInfo.OperandType) {
3769 unsigned Imm = Op.getImm();
3771 break;
3772 }
3774 unsigned Imm = Op.getImm();
3776 break;
3777 }
3780 unsigned Log2SEW = Op.getImm();
3781 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3783 OS << "e" << SEW;
3784 break;
3785 }
3787 unsigned Policy = Op.getImm();
3789 "Invalid Policy Value");
3792 break;
3793 }
3794
3795 return Comment;
3796}
3797
3798
3799#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3800 RISCV::Pseudo##OP##_##LMUL
3801
3802#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3803 RISCV::Pseudo##OP##_##LMUL##_MASK
3804
3805#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3806 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3807 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3808
3809#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3810 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3811 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3812 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3813 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3814 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3815 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3816
3817#define CASE_RVV_OPCODE_UNMASK(OP) \
3818 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3819 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3820
3821#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3822 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3823 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3824 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3825 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3826 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3827 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3828
3829#define CASE_RVV_OPCODE_MASK(OP) \
3830 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3831 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3832
3833#define CASE_RVV_OPCODE_WIDEN(OP) \
3834 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3835 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3836
3837#define CASE_RVV_OPCODE(OP) \
3838 CASE_RVV_OPCODE_UNMASK(OP): \
3839 case CASE_RVV_OPCODE_MASK(OP)
3840
3841
3842
3843#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3844 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3845
3846#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3847 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3848 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3849 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3850 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3851 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3852 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3853 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3854
3855
3856#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3857 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3858
3859#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3860 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3861 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3862 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3863 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3864
3865#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3866 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3867 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3868
3869#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3870 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3871 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3872
3873#define CASE_VFMA_OPCODE_VV(OP) \
3874 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3875 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
3876 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3877 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3878
3879#define CASE_VFMA_SPLATS(OP) \
3880 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3881 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
3882 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3883 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3884
3885
3887 unsigned &SrcOpIdx1,
3888 unsigned &SrcOpIdx2) const {
3890 if (.isCommutable())
3891 return false;
3892
3893 switch (MI.getOpcode()) {
3894 case RISCV::TH_MVEQZ:
3895 case RISCV::TH_MVNEZ:
3896
3897
3898
3899 if (MI.getOperand(2).getReg() == RISCV::X0)
3900 return false;
3901
3902 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3903 case RISCV::QC_SELECTIEQ:
3904 case RISCV::QC_SELECTINE:
3905 case RISCV::QC_SELECTIIEQ:
3906 case RISCV::QC_SELECTIINE:
3907 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3908 case RISCV::QC_MVEQ:
3909 case RISCV::QC_MVNE:
3910 case RISCV::QC_MVLT:
3911 case RISCV::QC_MVGE:
3912 case RISCV::QC_MVLTU:
3913 case RISCV::QC_MVGEU:
3914 case RISCV::QC_MVEQI:
3915 case RISCV::QC_MVNEI:
3916 case RISCV::QC_MVLTI:
3917 case RISCV::QC_MVGEI:
3918 case RISCV::QC_MVLTUI:
3919 case RISCV::QC_MVGEUI:
3920 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
3921 case RISCV::TH_MULA:
3922 case RISCV::TH_MULAW:
3923 case RISCV::TH_MULAH:
3924 case RISCV::TH_MULS:
3925 case RISCV::TH_MULSW:
3926 case RISCV::TH_MULSH:
3927
3928 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3929 case RISCV::PseudoCCMOVGPRNoX0:
3930 case RISCV::PseudoCCMOVGPR:
3931
3932 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3958
3959 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3978
3981 1) == 0)
3982 return false;
3983
3984
3985
3986 unsigned CommutableOpIdx1 = 1;
3987 unsigned CommutableOpIdx2 = 3;
3988 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3989 CommutableOpIdx2))
3990 return false;
3991 return true;
3992 }
3999
4002 1) == 0)
4003 return false;
4004
4005
4006
4007
4008
4009 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4010 return false;
4011 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4012 return false;
4013
4014
4015 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4016 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4017 return false;
4018
4019
4020
4021
4022 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4023 SrcOpIdx2 == CommuteAnyOperandIndex) {
4024
4025
4026 unsigned CommutableOpIdx1 = SrcOpIdx1;
4027 if (SrcOpIdx1 == SrcOpIdx2) {
4028
4029
4030 CommutableOpIdx1 = 1;
4031 } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4032
4033 CommutableOpIdx1 = SrcOpIdx2;
4034 }
4035
4036
4037
4038 unsigned CommutableOpIdx2;
4039 if (CommutableOpIdx1 != 1) {
4040
4041 CommutableOpIdx2 = 1;
4042 } else {
4043 Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
4044
4045
4046
4047
4048 if (Op1Reg != MI.getOperand(2).getReg())
4049 CommutableOpIdx2 = 2;
4050 else
4051 CommutableOpIdx2 = 3;
4052 }
4053
4054
4055
4056 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4057 CommutableOpIdx2))
4058 return false;
4059 }
4060
4061 return true;
4062 }
4063 }
4064
4066}
4067
4068
4069#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4070 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4071 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4072 break;
4073
4074#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4075 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4076 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4077 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4078 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4079 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4080 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4081 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4082
4083
4084#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4085 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4086 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4087 break;
4088
4089#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4090 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4091 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4092 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4093 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4094
4095#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4096 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4097 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4098
4099#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4100 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4101 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4102
4103#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4104 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4105 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4106 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4107 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4108
4109#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4110 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4111 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4112 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4113 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4114
4115
4117 bool NewMI,
4118 unsigned OpIdx1,
4119 unsigned OpIdx2) const {
4121 if (NewMI)
4122 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
4123 return MI;
4124 };
4125
4126 switch (MI.getOpcode()) {
4127 case RISCV::TH_MVEQZ:
4128 case RISCV::TH_MVNEZ: {
4129 auto &WorkingMI = cloneIfNew(MI);
4130 WorkingMI.setDesc(get(MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4131 : RISCV::TH_MVEQZ));
4133 OpIdx2);
4134 }
4135 case RISCV::QC_SELECTIEQ:
4136 case RISCV::QC_SELECTINE:
4137 case RISCV::QC_SELECTIIEQ:
4138 case RISCV::QC_SELECTIINE:
4140 case RISCV::QC_MVEQ:
4141 case RISCV::QC_MVNE:
4142 case RISCV::QC_MVLT:
4143 case RISCV::QC_MVGE:
4144 case RISCV::QC_MVLTU:
4145 case RISCV::QC_MVGEU:
4146 case RISCV::QC_MVEQI:
4147 case RISCV::QC_MVNEI:
4148 case RISCV::QC_MVLTI:
4149 case RISCV::QC_MVGEI:
4150 case RISCV::QC_MVLTUI:
4151 case RISCV::QC_MVGEUI: {
4152 auto &WorkingMI = cloneIfNew(MI);
4155 OpIdx2);
4156 }
4157 case RISCV::PseudoCCMOVGPRNoX0:
4158 case RISCV::PseudoCCMOVGPR: {
4159
4162 auto &WorkingMI = cloneIfNew(MI);
4163 WorkingMI.getOperand(3).setImm(CC);
4165 OpIdx1, OpIdx2);
4166 }
4185
4186
4187 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
4188 assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
4189 unsigned Opc;
4190 switch (MI.getOpcode()) {
4191 default:
4211 }
4212
4213 auto &WorkingMI = cloneIfNew(MI);
4214 WorkingMI.setDesc(get(Opc));
4216 OpIdx1, OpIdx2);
4217 }
4224 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
4225
4226
4227 if (OpIdx1 == 3 || OpIdx2 == 3) {
4228 unsigned Opc;
4229 switch (MI.getOpcode()) {
4230 default:
4238 }
4239
4240 auto &WorkingMI = cloneIfNew(MI);
4241 WorkingMI.setDesc(get(Opc));
4243 OpIdx1, OpIdx2);
4244 }
4245
4246 break;
4247 }
4248 }
4249
4251}
4252
4253#undef CASE_VMA_CHANGE_OPCODE_COMMON
4254#undef CASE_VMA_CHANGE_OPCODE_LMULS
4255#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4256#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4257#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4258#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4259#undef CASE_VFMA_CHANGE_OPCODE_VV
4260#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4261
4262#undef CASE_RVV_OPCODE_UNMASK_LMUL
4263#undef CASE_RVV_OPCODE_MASK_LMUL
4264#undef CASE_RVV_OPCODE_LMUL
4265#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4266#undef CASE_RVV_OPCODE_UNMASK
4267#undef CASE_RVV_OPCODE_MASK_WIDEN
4268#undef CASE_RVV_OPCODE_MASK
4269#undef CASE_RVV_OPCODE_WIDEN
4270#undef CASE_RVV_OPCODE
4271
4272#undef CASE_VMA_OPCODE_COMMON
4273#undef CASE_VMA_OPCODE_LMULS
4274#undef CASE_VFMA_OPCODE_COMMON
4275#undef CASE_VFMA_OPCODE_LMULS_M1
4276#undef CASE_VFMA_OPCODE_LMULS_MF2
4277#undef CASE_VFMA_OPCODE_LMULS_MF4
4278#undef CASE_VFMA_OPCODE_VV
4279#undef CASE_VFMA_SPLATS
4280
4282 switch (MI.getOpcode()) {
4283 default:
4284 break;
4285 case RISCV::ADD:
4286 case RISCV::OR:
4287 case RISCV::XOR:
4288
4289
4290 if (MI.getOperand(1).getReg() == RISCV::X0)
4291 commuteInstruction(MI);
4292
4293 if (MI.getOperand(2).getReg() == RISCV::X0) {
4294 MI.getOperand(2).ChangeToImmediate(0);
4295 MI.setDesc(get(RISCV::ADDI));
4296 return true;
4297 }
4298
4299 if (MI.getOpcode() == RISCV::XOR &&
4300 MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
4301 MI.getOperand(1).setReg(RISCV::X0);
4302 MI.getOperand(2).ChangeToImmediate(0);
4303 MI.setDesc(get(RISCV::ADDI));
4304 return true;
4305 }
4306 break;
4307 case RISCV::ORI:
4308 case RISCV::XORI:
4309
4310 if (MI.getOperand(1).getReg() == RISCV::X0) {
4311 MI.setDesc(get(RISCV::ADDI));
4312 return true;
4313 }
4314 break;
4315 case RISCV::SUB:
4316
4317 if (MI.getOperand(2).getReg() == RISCV::X0) {
4318 MI.getOperand(2).ChangeToImmediate(0);
4319 MI.setDesc(get(RISCV::ADDI));
4320 return true;
4321 }
4322 break;
4323 case RISCV::SUBW:
4324
4325 if (MI.getOperand(2).getReg() == RISCV::X0) {
4326 MI.getOperand(2).ChangeToImmediate(0);
4327 MI.setDesc(get(RISCV::ADDIW));
4328 return true;
4329 }
4330 break;
4331 case RISCV::ADDW:
4332
4333
4334 if (MI.getOperand(1).getReg() == RISCV::X0)
4335 commuteInstruction(MI);
4336
4337 if (MI.getOperand(2).getReg() == RISCV::X0) {
4338 MI.getOperand(2).ChangeToImmediate(0);
4339 MI.setDesc(get(RISCV::ADDIW));
4340 return true;
4341 }
4342 break;
4343 case RISCV::SH1ADD:
4344 case RISCV::SH1ADD_UW:
4345 case RISCV::SH2ADD:
4346 case RISCV::SH2ADD_UW:
4347 case RISCV::SH3ADD:
4348 case RISCV::SH3ADD_UW:
4349
4350 if (MI.getOperand(1).getReg() == RISCV::X0) {
4351 MI.removeOperand(1);
4353 MI.setDesc(get(RISCV::ADDI));
4354 return true;
4355 }
4356
4357 if (MI.getOperand(2).getReg() == RISCV::X0) {
4358 MI.removeOperand(2);
4359 unsigned Opc = MI.getOpcode();
4360 if (Opc == RISCV::SH1ADD_UW || Opc == RISCV::SH2ADD_UW ||
4361 Opc == RISCV::SH3ADD_UW) {
4363 MI.setDesc(get(RISCV::SLLI_UW));
4364 return true;
4365 }
4367 MI.setDesc(get(RISCV::SLLI));
4368 return true;
4369 }
4370 break;
4371 case RISCV::AND:
4372 case RISCV::MUL:
4373 case RISCV::MULH:
4374 case RISCV::MULHSU:
4375 case RISCV::MULHU:
4376 case RISCV::MULW:
4377
4378
4379
4380
4381 if (MI.getOperand(1).getReg() == RISCV::X0 ||
4382 MI.getOperand(2).getReg() == RISCV::X0) {
4383 MI.getOperand(1).setReg(RISCV::X0);
4384 MI.getOperand(2).ChangeToImmediate(0);
4385 MI.setDesc(get(RISCV::ADDI));
4386 return true;
4387 }
4388 break;
4389 case RISCV::ANDI:
4390
4391 if (MI.getOperand(1).getReg() == RISCV::X0) {
4392 MI.getOperand(2).setImm(0);
4393 MI.setDesc(get(RISCV::ADDI));
4394 return true;
4395 }
4396 break;
4397 case RISCV::SLL:
4398 case RISCV::SRL:
4399 case RISCV::SRA:
4400
4401 if (MI.getOperand(1).getReg() == RISCV::X0) {
4402 MI.getOperand(2).ChangeToImmediate(0);
4403 MI.setDesc(get(RISCV::ADDI));
4404 return true;
4405 }
4406
4407 if (MI.getOperand(2).getReg() == RISCV::X0) {
4408 MI.getOperand(2).ChangeToImmediate(0);
4409 MI.setDesc(get(RISCV::ADDI));
4410 return true;
4411 }
4412 break;
4413 case RISCV::SLLW:
4414 case RISCV::SRLW:
4415 case RISCV::SRAW:
4416
4417 if (MI.getOperand(1).getReg() == RISCV::X0) {
4418 MI.getOperand(2).ChangeToImmediate(0);
4419 MI.setDesc(get(RISCV::ADDI));
4420 return true;
4421 }
4422 break;
4423 case RISCV::SLLI:
4424 case RISCV::SRLI:
4425 case RISCV::SRAI:
4426 case RISCV::SLLIW:
4427 case RISCV::SRLIW:
4428 case RISCV::SRAIW:
4429 case RISCV::SLLI_UW:
4430
4431 if (MI.getOperand(1).getReg() == RISCV::X0) {
4432 MI.getOperand(2).setImm(0);
4433 MI.setDesc(get(RISCV::ADDI));
4434 return true;
4435 }
4436 break;
4437 case RISCV::SLTU:
4438 case RISCV::ADD_UW:
4439
4440
4441 if (MI.getOperand(1).getReg() == RISCV::X0 &&
4442 MI.getOperand(2).getReg() == RISCV::X0) {
4443 MI.getOperand(2).ChangeToImmediate(0);
4444 MI.setDesc(get(RISCV::ADDI));
4445 return true;
4446 }
4447
4448 if (MI.getOpcode() == RISCV::ADD_UW &&
4449 MI.getOperand(1).getReg() == RISCV::X0) {
4450 MI.removeOperand(1);
4452 MI.setDesc(get(RISCV::ADDI));
4453 }
4454 break;
4455 case RISCV::SLTIU:
4456
4457
4458 if (MI.getOperand(1).getReg() == RISCV::X0) {
4459 MI.getOperand(2).setImm(MI.getOperand(2).getImm() != 0);
4460 MI.setDesc(get(RISCV::ADDI));
4461 return true;
4462 }
4463 break;
4464 case RISCV::SEXT_H:
4465 case RISCV::SEXT_B:
4466 case RISCV::ZEXT_H_RV32:
4467 case RISCV::ZEXT_H_RV64:
4468
4469
4470 if (MI.getOperand(1).getReg() == RISCV::X0) {
4472 MI.setDesc(get(RISCV::ADDI));
4473 return true;
4474 }
4475 break;
4476 case RISCV::MIN:
4477 case RISCV::MINU:
4478 case RISCV::MAX:
4479 case RISCV::MAXU:
4480
4481 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
4482 MI.getOperand(2).ChangeToImmediate(0);
4483 MI.setDesc(get(RISCV::ADDI));
4484 return true;
4485 }
4486 break;
4487 case RISCV::BEQ:
4488 case RISCV::BNE:
4489
4490 if (MI.getOperand(0).getReg() == RISCV::X0) {
4492 MI.removeOperand(0);
4493 MI.insert(MI.operands_begin() + 1, {MO0});
4494 }
4495 break;
4496 case RISCV::BLTU:
4497
4498 if (MI.getOperand(0).getReg() == RISCV::X0) {
4500 MI.removeOperand(0);
4501 MI.insert(MI.operands_begin() + 1, {MO0});
4502 MI.setDesc(get(RISCV::BNE));
4503 }
4504 break;
4505 case RISCV::BGEU:
4506
4507 if (MI.getOperand(0).getReg() == RISCV::X0) {
4509 MI.removeOperand(0);
4510 MI.insert(MI.operands_begin() + 1, {MO0});
4511 MI.setDesc(get(RISCV::BEQ));
4512 }
4513 break;
4514 }
4515 return false;
4516}
4517
4518
4519#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4520 RISCV::PseudoV##OP##_##LMUL##_TIED
4521
4522#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4523 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4524 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4525 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4526 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4527 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4528 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4529
4530#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4531 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4532 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4533 break;
4534
4535#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4536 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4537 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4538 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4539 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4540 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4541 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4542
4543
4544#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4545 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4546
4547#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4548 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4549 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4550 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4551 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4552 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4553 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4554 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4555 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4556 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4557
4558#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4559 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4560 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4561 break;
4562
4563#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4564 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4565 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4566 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4567 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4568 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4569 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4570 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4571 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4572 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4573
4574#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4575 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4576 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4577 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4578 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4579 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4580
4581#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4582 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4583 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4584 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4585 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4586 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4587
4588
4593 switch (MI.getOpcode()) {
4594 default:
4595 return nullptr;
4601 MI.getNumExplicitOperands() == 7 &&
4602 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4603
4605 1) == 0)
4606 return nullptr;
4607
4608 unsigned NewOpc;
4609 switch (MI.getOpcode()) {
4610 default:
4616 }
4617
4618
4629 break;
4630 }
4635
4637 MI.getNumExplicitOperands() == 6);
4639 1) == 0)
4640 return nullptr;
4641
4642
4643 unsigned NewOpc;
4644 switch (MI.getOpcode()) {
4645 default:
4651 }
4652
4653
4663 break;
4664 }
4665 }
4667
4668 if (LV) {
4669 unsigned NumOps = MI.getNumOperands();
4670 for (unsigned I = 1; I < NumOps; ++I) {
4672 if (Op.isReg() && Op.isKill())
4674 }
4675 }
4676
4677 if (LIS) {
4679
4680 if (MI.getOperand(0).isEarlyClobber()) {
4681
4682
4683
4688 }
4689 }
4690
4691 return MIB;
4692}
4693
4694#undef CASE_WIDEOP_OPCODE_COMMON
4695#undef CASE_WIDEOP_OPCODE_LMULS
4696#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4697#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4698#undef CASE_FP_WIDEOP_OPCODE_COMMON
4699#undef CASE_FP_WIDEOP_OPCODE_LMULS
4700#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4701#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4702
4710 if (ShiftAmount == 0)
4711 return;
4714 .addImm(ShiftAmount)
4716 } else if (int ShXAmount, ShiftAmount;
4717 STI.hasShlAdd(3) &&
4718 (ShXAmount = isShifted359(Amount, ShiftAmount)) != 0) {
4719
4720 unsigned Opc;
4721 switch (ShXAmount) {
4722 case 1:
4723 Opc = RISCV::SH1ADD;
4724 break;
4725 case 2:
4726 Opc = RISCV::SH2ADD;
4727 break;
4728 case 3:
4729 Opc = RISCV::SH3ADD;
4730 break;
4731 default:
4733 }
4734 if (ShiftAmount)
4737 .addImm(ShiftAmount)
4744 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
4748 .addImm(ShiftAmount)
4755 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
4759 .addImm(ShiftAmount)
4765 } else if (STI.hasStdExtZmmul()) {
4766 Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
4772 } else {
4774 uint32_t PrevShiftAmount = 0;
4775 for (uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4776 if (Amount & (1U << ShiftAmount)) {
4777 if (ShiftAmount)
4780 .addImm(ShiftAmount - PrevShiftAmount)
4782 if (Amount >> (ShiftAmount + 1)) {
4783
4784 if (!Acc) {
4785 Acc = MRI.createVirtualRegister(&RISCV::GPRRegClass);
4789 } else {
4794 }
4795 }
4796 PrevShiftAmount = ShiftAmount;
4797 }
4798 }
4799 assert(Acc && "Expected valid accumulator");
4804 }
4805}
4806
4809 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4812 return ArrayRef(TargetFlags);
4813}
4814
4817 ? STI.getTailDupAggressiveThreshold()
4818 : 2;
4819}
4820
4822
4823
4824 unsigned Opcode = MI.getOpcode();
4825 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4827 return false;
4828 return true;
4829}
4830
4831std::optional<std::pair<unsigned, unsigned>>
4833 switch (Opcode) {
4834 default:
4835 return std::nullopt;
4836 case RISCV::PseudoVSPILL2_M1:
4837 case RISCV::PseudoVRELOAD2_M1:
4838 return std::make_pair(2u, 1u);
4839 case RISCV::PseudoVSPILL2_M2:
4840 case RISCV::PseudoVRELOAD2_M2:
4841 return std::make_pair(2u, 2u);
4842 case RISCV::PseudoVSPILL2_M4:
4843 case RISCV::PseudoVRELOAD2_M4:
4844 return std::make_pair(2u, 4u);
4845 case RISCV::PseudoVSPILL3_M1:
4846 case RISCV::PseudoVRELOAD3_M1:
4847 return std::make_pair(3u, 1u);
4848 case RISCV::PseudoVSPILL3_M2:
4849 case RISCV::PseudoVRELOAD3_M2:
4850 return std::make_pair(3u, 2u);
4851 case RISCV::PseudoVSPILL4_M1:
4852 case RISCV::PseudoVRELOAD4_M1:
4853 return std::make_pair(4u, 1u);
4854 case RISCV::PseudoVSPILL4_M2:
4855 case RISCV::PseudoVRELOAD4_M2:
4856 return std::make_pair(4u, 2u);
4857 case RISCV::PseudoVSPILL5_M1:
4858 case RISCV::PseudoVRELOAD5_M1:
4859 return std::make_pair(5u, 1u);
4860 case RISCV::PseudoVSPILL6_M1:
4861 case RISCV::PseudoVRELOAD6_M1:
4862 return std::make_pair(6u, 1u);
4863 case RISCV::PseudoVSPILL7_M1:
4864 case RISCV::PseudoVRELOAD7_M1:
4865 return std::make_pair(7u, 1u);
4866 case RISCV::PseudoVSPILL8_M1:
4867 case RISCV::PseudoVRELOAD8_M1:
4868 return std::make_pair(8u, 1u);
4869 }
4870}
4871
4873 int16_t MI1FrmOpIdx =
4874 RISCV::getNamedOperandIdx(MI1.getOpcode(), RISCV::OpName::frm);
4875 int16_t MI2FrmOpIdx =
4876 RISCV::getNamedOperandIdx(MI2.getOpcode(), RISCV::OpName::frm);
4877 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4878 return false;
4882}
4883
4884std::optional
4886 switch (Opcode) {
4887 default:
4888 return std::nullopt;
4889
4890
4891 case RISCV::VSLL_VX:
4892 case RISCV::VSRL_VX:
4893 case RISCV::VSRA_VX:
4894
4895 case RISCV::VSSRL_VX:
4896 case RISCV::VSSRA_VX:
4897
4898 case RISCV::VROL_VX:
4899 case RISCV::VROR_VX:
4900
4901 return Log2SEW;
4902
4903
4904 case RISCV::VNSRL_WX:
4905 case RISCV::VNSRA_WX:
4906
4907 case RISCV::VNCLIPU_WX:
4908 case RISCV::VNCLIP_WX:
4909
4910 case RISCV::VWSLL_VX:
4911
4912 return Log2SEW + 1;
4913
4914
4915 case RISCV::VADD_VX:
4916 case RISCV::VSUB_VX:
4917 case RISCV::VRSUB_VX:
4918
4919 case RISCV::VWADDU_VX:
4920 case RISCV::VWSUBU_VX:
4921 case RISCV::VWADD_VX:
4922 case RISCV::VWSUB_VX:
4923 case RISCV::VWADDU_WX:
4924 case RISCV::VWSUBU_WX:
4925 case RISCV::VWADD_WX:
4926 case RISCV::VWSUB_WX:
4927
4928 case RISCV::VADC_VXM:
4929 case RISCV::VADC_VIM:
4930 case RISCV::VMADC_VXM:
4931 case RISCV::VMADC_VIM:
4932 case RISCV::VMADC_VX:
4933 case RISCV::VSBC_VXM:
4934 case RISCV::VMSBC_VXM:
4935 case RISCV::VMSBC_VX:
4936
4937 case RISCV::VAND_VX:
4938 case RISCV::VOR_VX:
4939 case RISCV::VXOR_VX:
4940
4941 case RISCV::VMSEQ_VX:
4942 case RISCV::VMSNE_VX:
4943 case RISCV::VMSLTU_VX:
4944 case RISCV::VMSLT_VX:
4945 case RISCV::VMSLEU_VX:
4946 case RISCV::VMSLE_VX:
4947 case RISCV::VMSGTU_VX:
4948 case RISCV::VMSGT_VX:
4949
4950 case RISCV::VMINU_VX:
4951 case RISCV::VMIN_VX:
4952 case RISCV::VMAXU_VX:
4953 case RISCV::VMAX_VX:
4954
4955 case RISCV::VMUL_VX:
4956 case RISCV::VMULH_VX:
4957 case RISCV::VMULHU_VX:
4958 case RISCV::VMULHSU_VX:
4959
4960 case RISCV::VDIVU_VX:
4961 case RISCV::VDIV_VX:
4962 case RISCV::VREMU_VX:
4963 case RISCV::VREM_VX:
4964
4965 case RISCV::VWMUL_VX:
4966 case RISCV::VWMULU_VX:
4967 case RISCV::VWMULSU_VX:
4968
4969 case RISCV::VMACC_VX:
4970 case RISCV::VNMSAC_VX:
4971 case RISCV::VMADD_VX:
4972 case RISCV::VNMSUB_VX:
4973
4974 case RISCV::VWMACCU_VX:
4975 case RISCV::VWMACC_VX:
4976 case RISCV::VWMACCSU_VX:
4977 case RISCV::VWMACCUS_VX:
4978
4979 case RISCV::VMERGE_VXM:
4980
4981 case RISCV::VMV_V_X:
4982
4983 case RISCV::VSADDU_VX:
4984 case RISCV::VSADD_VX:
4985 case RISCV::VSSUBU_VX:
4986 case RISCV::VSSUB_VX:
4987
4988 case RISCV::VAADDU_VX:
4989 case RISCV::VAADD_VX:
4990 case RISCV::VASUBU_VX:
4991 case RISCV::VASUB_VX:
4992
4993 case RISCV::VSMUL_VX:
4994
4995 case RISCV::VMV_S_X:
4996
4997 case RISCV::VANDN_VX:
4998 return 1U << Log2SEW;
4999 }
5000}
5001
5004 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5005 if ()
5006 return 0;
5007 return RVV->BaseInstr;
5008}
5009
5011 unsigned DestEEW =
5013
5014 if (DestEEW == 0)
5015 return 0;
5016
5017 unsigned Scaled = Log2SEW + (DestEEW - 1);
5020}
5021
5028 int64_t Imm;
5030 return Imm;
5031 return std::nullopt;
5032}
5033
5034
5036 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5037 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5038 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5039 LHS.getReg() == RHS.getReg())
5040 return true;
5042 return true;
5043 if (LHS.isImm() && LHS.getImm() == 0)
5044 return true;
5046 return false;
5049 if (!LHSImm || !RHSImm)
5050 return false;
5051 return LHSImm <= RHSImm;
5052}
5053
5054namespace {
5059
5060public:
5063 : LHS(LHS), RHS(RHS), Cond(Cond.begin(), Cond.end()) {}
5064
5065 bool shouldIgnoreForPipelining(const MachineInstr *MI) const override {
5066
5067
5069 return true;
5071 return true;
5072 return false;
5073 }
5074
5075 std::optional createTripCountGreaterCondition(
5076 int TC, MachineBasicBlock &MBB,
5077 SmallVectorImpl &CondParam) override {
5078
5079
5080
5081 CondParam = Cond;
5082 return {};
5083 }
5084
5085 void setPreheader(MachineBasicBlock *NewPreheader) override {}
5086
5087 void adjustTripCount(int TripCountAdjust) override {}
5088};
5089}
5090
5091std::unique_ptrTargetInstrInfo::PipelinerLoopInfo
5096 return nullptr;
5097
5098
5099 if (TBB == LoopBB && FBB == LoopBB)
5100 return nullptr;
5101
5102
5103 if (FBB == nullptr)
5104 return nullptr;
5105
5106 assert((TBB == LoopBB || FBB == LoopBB) &&
5107 "The Loop must be a single-basic-block loop");
5108
5109
5110 if (TBB == LoopBB)
5112
5115 if (.isReg())
5116 return nullptr;
5118 if (!Reg.isVirtual())
5119 return nullptr;
5120 return MRI.getVRegDef(Reg);
5121 };
5122
5125 if (LHS && LHS->isPHI())
5126 return nullptr;
5127 if (RHS && RHS->isPHI())
5128 return nullptr;
5129
5130 return std::make_unique(LHS, RHS, Cond);
5131}
5132
5133
5136 Opc = RVVMCOpcode ? RVVMCOpcode : Opc;
5137 switch (Opc) {
5138 default:
5139 return false;
5140
5141 case RISCV::DIV:
5142 case RISCV::DIVW:
5143 case RISCV::DIVU:
5144 case RISCV::DIVUW:
5145 case RISCV::REM:
5146 case RISCV::REMW:
5147 case RISCV::REMU:
5148 case RISCV::REMUW:
5149
5150 case RISCV::FDIV_H:
5151 case RISCV::FDIV_S:
5152 case RISCV::FDIV_D:
5153 case RISCV::FDIV_H_INX:
5154 case RISCV::FDIV_S_INX:
5155 case RISCV::FDIV_D_INX:
5156 case RISCV::FDIV_D_IN32X:
5157 case RISCV::FSQRT_H:
5158 case RISCV::FSQRT_S:
5159 case RISCV::FSQRT_D:
5160 case RISCV::FSQRT_H_INX:
5161 case RISCV::FSQRT_S_INX:
5162 case RISCV::FSQRT_D_INX:
5163 case RISCV::FSQRT_D_IN32X:
5164
5165 case RISCV::VDIV_VV:
5166 case RISCV::VDIV_VX:
5167 case RISCV::VDIVU_VV:
5168 case RISCV::VDIVU_VX:
5169 case RISCV::VREM_VV:
5170 case RISCV::VREM_VX:
5171 case RISCV::VREMU_VV:
5172 case RISCV::VREMU_VX:
5173
5174 case RISCV::VFDIV_VV:
5175 case RISCV::VFDIV_VF:
5176 case RISCV::VFRDIV_VF:
5177 case RISCV::VFSQRT_V:
5178 case RISCV::VFRSQRT7_V:
5179 return true;
5180 }
5181}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const TargetInstrInfo & TII
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
Definition RISCVInstrInfo.cpp:3539
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
Definition RISCVInstrInfo.cpp:4109
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
Definition RISCVInstrInfo.cpp:4581
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
Definition RISCVInstrInfo.cpp:4547
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
Definition RISCVInstrInfo.cpp:107
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
Definition RISCVInstrInfo.cpp:4103
static bool analyzeCandidate(outliner::Candidate &C)
Definition RISCVInstrInfo.cpp:3560
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
Definition RISCVInstrInfo.cpp:2683
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
Definition RISCVInstrInfo.cpp:814
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
Definition RISCVInstrInfo.cpp:4563
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
Definition RISCVInstrInfo.cpp:2718
static unsigned getAddendOperandIdx(unsigned Pattern)
Definition RISCVInstrInfo.cpp:2705
#define CASE_RVV_OPCODE_UNMASK(OP)
Definition RISCVInstrInfo.cpp:3817
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
Definition RISCVInstrInfo.cpp:4535
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
Definition RISCVInstrInfo.cpp:3879
unsigned getPredicatedOpcode(unsigned Opcode)
Definition RISCVInstrInfo.cpp:1709
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
Definition RISCVInstrInfo.cpp:4574
#define CASE_WIDEOP_OPCODE_LMULS(OP)
Definition RISCVInstrInfo.cpp:4522
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
Definition RISCVInstrInfo.cpp:3527
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
Definition RISCVInstrInfo.cpp:2120
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
Definition RISCVInstrInfo.cpp:4074
#define CASE_RVV_OPCODE(OP)
Definition RISCVInstrInfo.cpp:3837
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
Definition RISCVInstrInfo.cpp:5022
#define CASE_VFMA_OPCODE_VV(OP)
Definition RISCVInstrInfo.cpp:3873
MachineOutlinerConstructionID
Definition RISCVInstrInfo.cpp:3510
#define CASE_RVV_OPCODE_WIDEN(OP)
Definition RISCVInstrInfo.cpp:3833
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
Definition RISCVInstrInfo.cpp:2617
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
Definition RISCVInstrInfo.cpp:3846
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
Definition RISCVInstrInfo.cpp:257
static bool isFMUL(unsigned Opc)
Definition RISCVInstrInfo.cpp:2131
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
Definition RISCVInstrInfo.cpp:1038
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
Definition RISCVInstrInfo.cpp:2557
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
Definition RISCVInstrInfo.cpp:2586
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
Definition RISCVInstrInfo.cpp:3520
static bool isFADD(unsigned Opc)
Definition RISCVInstrInfo.cpp:2109
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
Definition RISCVInstrInfo.cpp:2764
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
Definition RISCVInstrInfo.cpp:1505
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
Definition RISCVInstrInfo.cpp:3533
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
Definition RISCVInstrInfo.cpp:2507
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
Definition RISCVInstrInfo.cpp:2632
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
Definition RISCVInstrInfo.cpp:2534
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
Definition RISCVInstrInfo.cpp:2602
#define CASE_RVV_OPCODE_MASK(OP)
Definition RISCVInstrInfo.cpp:3829
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
Definition RISCVInstrInfo.cpp:1762
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
static cl::opt< unsigned > CacheLineSize("cache-line-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target cache line size when " "specified by the user."))
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Definition RISCVInstrInfo.cpp:647
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Definition RISCVInstrInfo.cpp:4589
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition RISCVInstrInfo.cpp:101
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
Definition RISCVInstrInfo.cpp:3582
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Definition RISCVInstrInfo.cpp:1271
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
Definition RISCVInstrInfo.cpp:2822
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
Definition RISCVInstrInfo.cpp:900
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
Definition RISCVInstrInfo.cpp:3237
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
Definition RISCVInstrInfo.cpp:4703
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
Definition RISCVInstrInfo.cpp:3258
RISCVInstrInfo(const RISCVSubtarget &STI)
Definition RISCVInstrInfo.cpp:84
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
Definition RISCVInstrInfo.cpp:3486
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Definition RISCVInstrInfo.cpp:5092
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
Definition RISCVInstrInfo.cpp:1305
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
Definition RISCVInstrInfo.cpp:2360
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
Definition RISCVInstrInfo.cpp:3270
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
Definition RISCVInstrInfo.cpp:383
bool isReMaterializableImpl(const MachineInstr &MI) const override
Definition RISCVInstrInfo.cpp:237
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
Definition RISCVInstrInfo.cpp:1835
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
Definition RISCVInstrInfo.cpp:3181
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
Definition RISCVInstrInfo.cpp:1343
bool isAsCheapAsAMove(const MachineInstr &MI) const override
Definition RISCVInstrInfo.cpp:1986
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
Definition RISCVInstrInfo.cpp:2854
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
Definition RISCVInstrInfo.cpp:3402
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
Definition RISCVInstrInfo.cpp:4815
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
Definition RISCVInstrInfo.cpp:2349
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition RISCVInstrInfo.cpp:186
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
Definition RISCVInstrInfo.cpp:2439
bool simplifyInstruction(MachineInstr &MI) const override
Definition RISCVInstrInfo.cpp:4281
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Definition RISCVInstrInfo.cpp:3466
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
Definition RISCVInstrInfo.cpp:3653
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
Definition RISCVInstrInfo.cpp:2065
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
Definition RISCVInstrInfo.cpp:1900
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
Definition RISCVInstrInfo.cpp:3724
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Definition RISCVInstrInfo.cpp:1414
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
Definition RISCVInstrInfo.cpp:4808
MCInst getNop() const override
Definition RISCVInstrInfo.cpp:92
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
Definition RISCVInstrInfo.cpp:885
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Definition RISCVInstrInfo.cpp:731
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
Definition RISCVInstrInfo.cpp:3503
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
Definition RISCVInstrInfo.cpp:3291
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Definition RISCVInstrInfo.cpp:3689
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
Definition RISCVInstrInfo.cpp:2079
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Definition RISCVInstrInfo.cpp:3460
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Definition RISCVInstrInfo.cpp:4116
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
Definition RISCVInstrInfo.cpp:2326
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
Definition RISCVInstrInfo.cpp:1654
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
Definition RISCVInstrInfo.cpp:3742
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Definition RISCVInstrInfo.cpp:3515
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
Definition RISCVInstrInfo.cpp:506
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Definition RISCVInstrInfo.cpp:3886
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Definition RISCVInstrInfo.cpp:1199
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
Definition RISCVInstrInfo.cpp:3705
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
Definition RISCVInstrInfo.cpp:1661
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
Definition RISCVInstrInfo.cpp:965
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
Definition RISCVInstrInfo.cpp:2383
CombinerObjective getCombinerObjective(unsigned Pattern) const override
Definition RISCVInstrInfo.cpp:2657
bool isHighLatencyDef(int Opc) const override
Definition RISCVInstrInfo.cpp:5134
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
Definition RISCVInstrInfo.cpp:1004
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
Definition RISCVInstrInfo.cpp:2669
bool optimizeCondBranch(MachineInstr &MI) const override
Definition RISCVInstrInfo.cpp:1528
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
Definition RISCVInstrInfo.cpp:2012
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
Definition RISCVInstrInfo.cpp:1811
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
Definition RISCVInstrInfo.cpp:1514
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Definition RISCVInstrInfo.cpp:3368
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Definition RISCVInstrInfo.cpp:3426
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
Definition RISCVInstrInfo.cpp:1180
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
Definition RISCVInstrInfo.cpp:1069
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
Definition RISCVInstrInfo.cpp:68
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
Definition RISCVInstrInfo.cpp:4872
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
Definition RISCVInstrInfo.cpp:5035
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
Definition RISCVInstrInfo.cpp:5002
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
Definition RISCVInstrInfo.cpp:5010
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
Definition RISCVInstrInfo.cpp:4885
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
Definition RISCVInstrInfo.cpp:4832
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
Definition RISCVInstrInfo.cpp:4821
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
static const MachineMemOperand::Flags MONontemporalBit0
unsigned getDeadRegState(bool B)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa - Return true if the parameter to the template is an instance of one of the template type argu...
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.