LLVM: lib/Target/RISCV/RISCVISelDAGToDAG.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
22#include "llvm/IR/IntrinsicsRISCV.h"
27
28using namespace llvm;
29
30#define DEBUG_TYPE "riscv-isel"
31#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
32
34 "riscv-use-rematerializable-movimm", cl::Hidden,
35 cl::desc("Use a rematerializable pseudoinstruction for 2 instruction "
36 "constant materialization"),
38
39#define GET_DAGISEL_BODY RISCVDAGToDAGISel
40#include "RISCVGenDAGISel.inc"
41
44
45 bool MadeChange = false;
46 while (Position != CurDAG->allnodes_begin()) {
48 if (N->use_empty())
49 continue;
50
52 switch (N->getOpcode()) {
54 if (Subtarget->enablePExtCodeGen())
55 break;
56
57
58 MVT VT = N->getSimpleValueType(0);
59 unsigned Opc =
60 VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
62 SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
63 SDValue Src = N->getOperand(0);
66 N->getOperand(0));
67 Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT), Src, VL);
68 break;
69 }
70 case RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL: {
71
72
73
74 assert(N->getNumOperands() == 4 && "Unexpected number of operands");
75 MVT VT = N->getSimpleValueType(0);
76 SDValue Passthru = N->getOperand(0);
79 SDValue VL = N->getOperand(3);
81 Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
82 "Unexpected VTs!");
85
86
91
94
99
101
104 CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
106 IntID,
107 Passthru,
108 StackSlot,
109 CurDAG->getRegister(RISCV::X0, MVT::i64),
110 VL};
111
113 MVT::i64, MPI, Align(8),
115 break;
116 }
117 case ISD::FP_EXTEND: {
118
120 MVT VT = N->getSimpleValueType(0);
122 break;
123 SDValue VLMAX = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
126 Result = CurDAG->getNode(RISCVISD::FP_EXTEND_VL, DL, VT, N->getOperand(0),
127 TrueMask, VLMAX);
128 break;
129 }
130 }
131
132 if (Result) {
133 LLVM_DEBUG(dbgs() << "RISC-V DAG preprocessing replacing:\nOld: ");
138
139 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
140 MadeChange = true;
141 }
142 }
143
144 if (MadeChange)
145 CurDAG->RemoveDeadNodes();
146}
147
151
152 bool MadeChange = false;
153 while (Position != CurDAG->allnodes_begin()) {
155
156 if (N->use_empty() || ->isMachineOpcode())
157 continue;
158
159 MadeChange |= doPeepholeSExtW(N);
160
161
162
163
165 }
166
168
169
170
171
172
173
174
175 MadeChange |= doPeepholeNoRegPassThru();
176
177 if (MadeChange)
178 CurDAG->RemoveDeadNodes();
179}
180
186 SDNode *Result = nullptr;
187 switch (Inst.getOpndKind()) {
189 Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SDImm);
190 break;
192 Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg,
194 break;
196 Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SrcReg);
197 break;
199 Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SDImm);
200 break;
201 }
202
203
204 SrcReg = SDValue(Result, 0);
205 }
206
207 return SrcReg;
208}
209
213
214
219 0);
220
221
222
223
224
225
226 if (Seq.size() > 3) {
227 unsigned ShiftAmt, AddOpc;
230 if (!SeqLo.empty() && (SeqLo.size() + 2) < Seq.size()) {
232
236 0);
238 }
239 }
240
241
243}
244
248 bool IsLoad, MVT *IndexVT) {
250
251 Operands.push_back(Node->getOperand(CurOp++));
252
253 if (IsStridedOrIndexed) {
254 Operands.push_back(Node->getOperand(CurOp++));
255 if (IndexVT)
256 *IndexVT = Operands.back()->getSimpleValueType(0);
257 }
258
259 if (IsMasked) {
260 SDValue Mask = Node->getOperand(CurOp++);
262 }
266
267 MVT XLenVT = Subtarget->getXLenVT();
268 SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
270
271
272
273
274 if (IsLoad) {
276 if (IsMasked)
277 Policy = Node->getConstantOperandVal(CurOp++);
278 SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
280 }
281
282 Operands.push_back(Chain);
283}
284
286 bool IsStrided) {
288 MVT VT = Node->getSimpleValueType(0);
289 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
291
292 unsigned CurOp = 2;
294
296
298 Operands, true);
299
301 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, false, Log2SEW,
302 static_cast<unsigned>(LMUL));
304 CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
305
307
311}
312
314 bool IsMasked) {
316 MVT VT = Node->getSimpleValueType(0);
317 MVT XLenVT = Subtarget->getXLenVT();
318 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
320
321 unsigned CurOp = 2;
323
325
327 false, Operands,
328 true);
329
331 RISCV::getVLSEGPseudo(NF, IsMasked, false, true,
332 Log2SEW, static_cast<unsigned>(LMUL));
334 XLenVT, MVT::Other, Operands);
335
337
342}
343
345 bool IsOrdered) {
347 MVT VT = Node->getSimpleValueType(0);
348 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
350
351 unsigned CurOp = 2;
353
355
356 MVT IndexVT;
358 true, Operands,
359 true, &IndexVT);
360
361#ifndef NDEBUG
362
365 if (DecodedLMUL.second)
366 ContainedTyNumElts /= DecodedLMUL.first;
367 else
368 ContainedTyNumElts *= DecodedLMUL.first;
370 "Element count mismatch");
371#endif
372
375 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
377 "values when XLEN=32");
378 }
380 NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
381 static_cast<unsigned>(IndexLMUL));
383 CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
384
386
390}
391
393 bool IsStrided) {
395 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
396 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
398
399 unsigned CurOp = 2;
401
403
405 Operands);
406
408 NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
410 CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
411
413
415}
416
418 bool IsOrdered) {
420 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
421 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
423
424 unsigned CurOp = 2;
426
428
429 MVT IndexVT;
431 true, Operands,
432 false, &IndexVT);
433
434#ifndef NDEBUG
435
438 if (DecodedLMUL.second)
439 ContainedTyNumElts /= DecodedLMUL.first;
440 else
441 ContainedTyNumElts *= DecodedLMUL.first;
443 "Element count mismatch");
444#endif
445
448 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
450 "values when XLEN=32");
451 }
453 NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
454 static_cast<unsigned>(IndexLMUL));
456 CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
457
459
461}
462
464 if (!Subtarget->hasVInstructions())
465 return;
466
468
470 MVT XLenVT = Subtarget->getXLenVT();
471
472 unsigned IntNo = Node->getConstantOperandVal(0);
473
474 assert((IntNo == Intrinsic::riscv_vsetvli ||
475 IntNo == Intrinsic::riscv_vsetvlimax) &&
476 "Unexpected vsetvli intrinsic");
477
478 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
479 unsigned Offset = (VLMax ? 1 : 2);
480
482 "Unexpected number of operands");
483
484 unsigned SEW =
487 Node->getConstantOperandVal(Offset + 1) & 0x7);
488
490 true);
491 SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
492
494 unsigned Opcode = RISCV::PseudoVSETVLI;
496 if (auto VLEN = Subtarget->getRealVLen())
498 VLMax = true;
499 }
501 VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
502 Opcode = RISCV::PseudoVSETVLIX0;
503 } else {
504 VLOperand = Node->getOperand(1);
505
507 uint64_t AVL = C->getZExtValue();
509 SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
511 XLenVT, VLImm, VTypeIOp));
512 return;
513 }
514 }
515 }
516
518 CurDAG->getMachineNode(Opcode, DL, XLenVT, VLOperand, VTypeIOp));
519}
520
522 if (!Subtarget->hasVendorXSfmmbase())
523 return;
524
526
528 MVT XLenVT = Subtarget->getXLenVT();
529
530 unsigned IntNo = Node->getConstantOperandVal(0);
531
532 assert((IntNo == Intrinsic::riscv_sf_vsettnt ||
533 IntNo == Intrinsic::riscv_sf_vsettm ||
534 IntNo == Intrinsic::riscv_sf_vsettk) &&
535 "Unexpected XSfmm vset intrinsic");
536
539 unsigned PseudoOpCode =
540 IntNo == Intrinsic::riscv_sf_vsettnt ? RISCV::PseudoSF_VSETTNT
541 : IntNo == Intrinsic::riscv_sf_vsettm ? RISCV::PseudoSF_VSETTM
542 : RISCV::PseudoSF_VSETTK;
543
544 if (IntNo == Intrinsic::riscv_sf_vsettnt) {
546 SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
547
549 Node->getOperand(1), VTypeIOp));
550 } else {
554 CurDAG->getMachineNode(PseudoOpCode, DL, XLenVT,
555 Node->getOperand(1), Log2SEW, TWiden));
556 }
557}
558
560 MVT VT = Node->getSimpleValueType(0);
561 unsigned Opcode = Node->getOpcode();
563 "Unexpected opcode");
565
566
567
570
572 if (!Cst)
573 return false;
574
576
577
579 return false;
580
582
583
584
585
586 bool SignExt = false;
589 SignExt = true;
591 }
592
594 return false;
595
597 if (!ShlCst)
598 return false;
599
601
602
603
605 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
606 return false;
607
608 int64_t ShiftedVal = Val >> ShAmt;
610 return false;
611
612
613 if (SignExt && ShAmt >= 32)
614 return false;
615
616
617 unsigned BinOpc;
618 switch (Opcode) {
620 case ISD::AND: BinOpc = RISCV::ANDI; break;
621 case ISD::OR: BinOpc = RISCV::ORI; break;
622 case ISD::XOR: BinOpc = RISCV::XORI; break;
623 }
624
625 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
626
629 CurDAG->getSignedTargetConstant(ShiftedVal, DL, VT));
632 CurDAG->getTargetConstant(ShAmt, DL, VT));
634 return true;
635}
636
638 unsigned Opc;
639
640 if (Subtarget->hasVendorXTHeadBb())
641 Opc = RISCV::TH_EXT;
642 else if (Subtarget->hasVendorXAndesPerf())
643 Opc = RISCV::NDS_BFOS;
644 else if (Subtarget->hasVendorXqcibm())
645 Opc = RISCV::QC_EXT;
646 else
647
648 return false;
649
651 if (!N1C)
652 return false;
653
656 return false;
657
658 auto BitfieldExtract = [&](SDValue N0, unsigned Msb, unsigned Lsb,
660 if (Opc == RISCV::QC_EXT) {
661
662
663
664 Msb = Msb - Lsb + 1;
665 }
667 CurDAG->getTargetConstant(Msb, DL, VT),
668 CurDAG->getTargetConstant(Lsb, DL, VT));
669 };
670
672 MVT VT = Node->getSimpleValueType(0);
673 const unsigned RightShAmt = N1C->getZExtValue();
674
675
676
679 if (!N01C)
680 return false;
681
682 const unsigned LeftShAmt = N01C->getZExtValue();
683
684
685 if (LeftShAmt > RightShAmt)
686 return false;
687
688 const unsigned MsbPlusOne = VT.getSizeInBits() - LeftShAmt;
689 const unsigned Msb = MsbPlusOne - 1;
690 const unsigned Lsb = RightShAmt - LeftShAmt;
691
692 SDNode *Sbe = BitfieldExtract(N0, Msb, Lsb, DL, VT);
694 return true;
695 }
696
697
698
700 unsigned ExtSize =
702
703
704 if (ExtSize == 32)
705 return false;
706
707 const unsigned Msb = ExtSize - 1;
708
709
710 const unsigned Lsb = RightShAmt > Msb ? Msb : RightShAmt;
711
712 SDNode *Sbe = BitfieldExtract(N0, Msb, Lsb, DL, VT);
714 return true;
715 }
716
717 return false;
718}
719
721
722 if (!Subtarget->hasVendorXAndesPerf())
723 return false;
724
726 if (!N1C)
727 return false;
728
731 return false;
732
733 auto BitfieldInsert = [&](SDValue N0, unsigned Msb, unsigned Lsb,
735 unsigned Opc = RISCV::NDS_BFOS;
736
737 if (Lsb == Msb)
738 Lsb = 0;
740 CurDAG->getTargetConstant(Lsb, DL, VT),
741 CurDAG->getTargetConstant(Msb, DL, VT));
742 };
743
745 MVT VT = Node->getSimpleValueType(0);
746 const unsigned RightShAmt = N1C->getZExtValue();
747
748
749
752 if (!N01C)
753 return false;
754
755 const unsigned LeftShAmt = N01C->getZExtValue();
756
757
758 if (LeftShAmt <= RightShAmt)
759 return false;
760
761 const unsigned MsbPlusOne = VT.getSizeInBits() - RightShAmt;
762 const unsigned Msb = MsbPlusOne - 1;
763 const unsigned Lsb = LeftShAmt - RightShAmt;
764
765 SDNode *Sbi = BitfieldInsert(N0, Msb, Lsb, DL, VT);
767 return true;
768 }
769
770 return false;
771}
772
776 unsigned Lsb) {
777 unsigned Opc;
778
779 if (Subtarget->hasVendorXTHeadBb()) {
780 Opc = RISCV::TH_EXTU;
781 } else if (Subtarget->hasVendorXAndesPerf()) {
782 Opc = RISCV::NDS_BFOZ;
783 } else if (Subtarget->hasVendorXqcibm()) {
784 Opc = RISCV::QC_EXTU;
785
786
787
788 Msb = Msb - Lsb + 1;
789 } else {
790
791 return false;
792 }
793
795 CurDAG->getTargetConstant(Msb, DL, VT),
796 CurDAG->getTargetConstant(Lsb, DL, VT));
798 return true;
799}
800
804 unsigned Lsb) {
805
806 if (!Subtarget->hasVendorXAndesPerf())
807 return false;
808
809 unsigned Opc = RISCV::NDS_BFOZ;
810
811
812 if (Lsb == Msb)
813 Lsb = 0;
815 CurDAG->getTargetConstant(Lsb, DL, VT),
816 CurDAG->getTargetConstant(Msb, DL, VT));
818 return true;
819}
820
822
823 if (!Subtarget->hasVendorXTHeadMemIdx())
824 return false;
825
829 return false;
830
832 if ()
833 return false;
834
837 "Unexpected addressing mode");
840 int64_t Offset = C->getSExtValue();
841
842
843
844 unsigned Shift;
845 for (Shift = 0; Shift < 4; Shift++)
847 break;
848
849
850 if (Shift == 4)
851 return false;
852
854 unsigned Opcode;
855 if (LoadVT == MVT::i8 && IsPre)
856 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
857 else if (LoadVT == MVT::i8 && IsPost)
858 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
859 else if (LoadVT == MVT::i16 && IsPre)
860 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
861 else if (LoadVT == MVT::i16 && IsPost)
862 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
863 else if (LoadVT == MVT::i32 && IsPre)
864 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
865 else if (LoadVT == MVT::i32 && IsPost)
866 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
867 else if (LoadVT == MVT::i64 && IsPre)
868 Opcode = RISCV::TH_LDIB;
869 else if (LoadVT == MVT::i64 && IsPost)
870 Opcode = RISCV::TH_LDIA;
871 else
872 return false;
873
881
884
886
887 return true;
888}
889
891 assert(TileNum <= 15 && "Invalid tile number");
892 return RISCV::T0 + TileNum;
893}
894
896 if (!Subtarget->hasVInstructions())
897 return;
898
900
902 unsigned IntNo = Node->getConstantOperandVal(1);
903
904 assert((IntNo == Intrinsic::riscv_sf_vc_x_se ||
905 IntNo == Intrinsic::riscv_sf_vc_i_se) &&
906 "Unexpected vsetvli intrinsic");
907
908
909 unsigned Log2SEW = Log2_32(Node->getConstantOperandVal(6));
911 CurDAG->getTargetConstant(Log2SEW, DL, Subtarget->getXLenVT());
913 Node->getOperand(4), Node->getOperand(5),
914 Node->getOperand(8), SEWOp,
915 Node->getOperand(0)};
916
917 unsigned Opcode;
919 switch (LMulSDNode->getSExtValue()) {
920 case 5:
921 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_MF8
922 : RISCV::PseudoSF_VC_I_SE_MF8;
923 break;
924 case 6:
925 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_MF4
926 : RISCV::PseudoSF_VC_I_SE_MF4;
927 break;
928 case 7:
929 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_MF2
930 : RISCV::PseudoSF_VC_I_SE_MF2;
931 break;
932 case 0:
933 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M1
934 : RISCV::PseudoSF_VC_I_SE_M1;
935 break;
936 case 1:
937 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M2
938 : RISCV::PseudoSF_VC_I_SE_M2;
939 break;
940 case 2:
941 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M4
942 : RISCV::PseudoSF_VC_I_SE_M4;
943 break;
944 case 3:
945 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M8
946 : RISCV::PseudoSF_VC_I_SE_M8;
947 break;
948 }
949
951 Opcode, DL, Node->getSimpleValueType(0), Operands));
952}
953
955#define INST_NF_CASE(NAME, NF) \
956 case Intrinsic::riscv_##NAME##NF: \
957 return NF;
958#define INST_NF_CASE_MASK(NAME, NF) \
959 case Intrinsic::riscv_##NAME##NF##_mask: \
960 return NF;
961#define INST_NF_CASE_FF(NAME, NF) \
962 case Intrinsic::riscv_##NAME##NF##ff: \
963 return NF;
964#define INST_NF_CASE_FF_MASK(NAME, NF) \
965 case Intrinsic::riscv_##NAME##NF##ff_mask: \
966 return NF;
967#define INST_ALL_NF_CASE_BASE(MACRO_NAME, NAME) \
968 MACRO_NAME(NAME, 2) \
969 MACRO_NAME(NAME, 3) \
970 MACRO_NAME(NAME, 4) \
971 MACRO_NAME(NAME, 5) \
972 MACRO_NAME(NAME, 6) \
973 MACRO_NAME(NAME, 7) \
974 MACRO_NAME(NAME, 8)
975#define INST_ALL_NF_CASE(NAME) \
976 INST_ALL_NF_CASE_BASE(INST_NF_CASE, NAME) \
977 INST_ALL_NF_CASE_BASE(INST_NF_CASE_MASK, NAME)
978#define INST_ALL_NF_CASE_WITH_FF(NAME) \
979 INST_ALL_NF_CASE(NAME) \
980 INST_ALL_NF_CASE_BASE(INST_NF_CASE_FF, NAME) \
981 INST_ALL_NF_CASE_BASE(INST_NF_CASE_FF_MASK, NAME)
983 default:
993 }
994}
995
997
998 int16_t Bit31To16 = Val >> 16;
999 int16_t Bit15To0 = Val;
1000 int8_t Bit15To8 = Bit15To0 >> 8;
1001 int8_t Bit7To0 = Val;
1002 if (Bit31To16 != Bit15To0)
1003 return false;
1004
1005 return isInt<10>(Bit31To16) || Bit15To8 == Bit7To0;
1006}
1007
1009
1010 if (Node->isMachineOpcode()) {
1012 Node->setNodeId(-1);
1013 return;
1014 }
1015
1016
1017
1018 unsigned Opcode = Node->getOpcode();
1019 MVT XLenVT = Subtarget->getXLenVT();
1021 MVT VT = Node->getSimpleValueType(0);
1022
1023 bool HasBitTest = Subtarget->hasBEXTILike();
1024
1025 switch (Opcode) {
1027 assert((VT == Subtarget->getXLenVT() || VT == MVT::i32) && "Unexpected VT");
1029 if (ConstNode->isZero()) {
1031 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, VT);
1033 return;
1034 }
1035 int64_t Imm = ConstNode->getSExtValue();
1036
1037
1038
1041
1042
1046
1047
1050
1053
1054
1055
1056 Imm = ((uint64_t)Imm << 32) | (Imm & 0xFFFFFFFF);
1057 }
1058
1060 return;
1061 }
1064
1065 bool Is64Bit = Subtarget->is64Bit();
1066 bool HasZdinx = Subtarget->hasStdExtZdinx();
1067
1068 bool NegZeroF64 = APF.isNegZero() && VT == MVT::f64;
1070
1071
1072 if (APF.isPosZero() || NegZeroF64) {
1073 if (VT == MVT::f64 && HasZdinx && !Is64Bit)
1074 Imm = CurDAG->getRegister(RISCV::X0_Pair, MVT::f64);
1075 else
1076 Imm = CurDAG->getRegister(RISCV::X0, XLenVT);
1077 } else {
1079 *Subtarget);
1080 }
1081
1082 unsigned Opc;
1084 default:
1086 case MVT::bf16:
1087 assert(Subtarget->hasStdExtZfbfmin());
1088 Opc = RISCV::FMV_H_X;
1089 break;
1090 case MVT::f16:
1091 Opc = Subtarget->hasStdExtZhinxmin() ? RISCV::COPY : RISCV::FMV_H_X;
1092 break;
1093 case MVT::f32:
1094 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
1095 break;
1096 case MVT::f64:
1097
1098
1099 assert((Subtarget->is64Bit() || APF.isZero()) && "Unexpected constant");
1100 if (HasZdinx)
1101 Opc = RISCV::COPY;
1102 else
1103 Opc = Is64Bit ? RISCV::FMV_D_X : RISCV::FCVT_D_W;
1104 break;
1105 }
1106
1108 if (VT.SimpleTy == MVT::f16 && Opc == RISCV::COPY) {
1109 Res =
1110 CurDAG->getTargetExtractSubreg(RISCV::sub_16, DL, VT, Imm).getNode();
1111 } else if (VT.SimpleTy == MVT::f32 && Opc == RISCV::COPY) {
1112 Res =
1113 CurDAG->getTargetExtractSubreg(RISCV::sub_32, DL, VT, Imm).getNode();
1114 } else if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
1115 Res = CurDAG->getMachineNode(
1118 else
1119 Res = CurDAG->getMachineNode(Opc, DL, VT, Imm);
1120
1121
1122 if (NegZeroF64) {
1123 Opc = RISCV::FSGNJN_D;
1124 if (HasZdinx)
1125 Opc = Is64Bit ? RISCV::FSGNJN_D_INX : RISCV::FSGNJN_D_IN32X;
1126 Res =
1128 }
1129
1131 return;
1132 }
1133 case RISCVISD::BuildGPRPair:
1134 case RISCVISD::BuildPairF64: {
1135 if (Opcode == RISCVISD::BuildPairF64 && !Subtarget->hasStdExtZdinx())
1136 break;
1137
1138 assert((!Subtarget->is64Bit() || Opcode == RISCVISD::BuildGPRPair) &&
1139 "BuildPairF64 only handled here on rv32i_zdinx");
1140
1142 CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32),
1143 Node->getOperand(0),
1144 CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32),
1145 Node->getOperand(1),
1146 CurDAG->getTargetConstant(RISCV::sub_gpr_odd, DL, MVT::i32)};
1147
1148 SDNode *N = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops);
1150 return;
1151 }
1152 case RISCVISD::SplitGPRPair:
1153 case RISCVISD::SplitF64: {
1154 if (Subtarget->hasStdExtZdinx() || Opcode != RISCVISD::SplitF64) {
1155 assert((!Subtarget->is64Bit() || Opcode == RISCVISD::SplitGPRPair) &&
1156 "SplitF64 only handled here on rv32i_zdinx");
1157
1159 SDValue Lo = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_even, DL,
1160 Node->getValueType(0),
1161 Node->getOperand(0));
1163 }
1164
1167 RISCV::sub_gpr_odd, DL, Node->getValueType(1), Node->getOperand(0));
1169 }
1170
1172 return;
1173 }
1174
1175 assert(Opcode != RISCVISD::SplitGPRPair &&
1176 "SplitGPRPair should already be handled");
1177
1178 if (!Subtarget->hasStdExtZfa())
1179 break;
1180 assert(Subtarget->hasStdExtD() && !Subtarget->is64Bit() &&
1181 "Unexpected subtarget");
1182
1183
1185 SDNode *Lo = CurDAG->getMachineNode(RISCV::FMV_X_W_FPR64, DL, VT,
1186 Node->getOperand(0));
1188 }
1190 SDNode *Hi = CurDAG->getMachineNode(RISCV::FMVH_X_D, DL, VT,
1191 Node->getOperand(0));
1193 }
1194
1196 return;
1197 }
1200 if (!N1C)
1201 break;
1205 break;
1206 unsigned ShAmt = N1C->getZExtValue();
1208
1210 unsigned XLen = Subtarget->getXLen();
1213 if (ShAmt <= 32 && TrailingZeros > 0 && LeadingZeros == 32) {
1214
1215
1218 CurDAG->getTargetConstant(TrailingZeros, DL, VT));
1220 RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
1221 CurDAG->getTargetConstant(TrailingZeros + ShAmt, DL, VT));
1223 return;
1224 }
1225 if (TrailingZeros == 0 && LeadingZeros > ShAmt &&
1226 XLen - LeadingZeros > 11 && LeadingZeros != 32) {
1227
1228
1229
1230
1231
1232
1233
1234
1237 CurDAG->getTargetConstant(LeadingZeros, DL, VT));
1239 RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
1240 CurDAG->getTargetConstant(LeadingZeros - ShAmt, DL, VT));
1242 return;
1243 }
1244 }
1245 break;
1246 }
1249 if (!N1C)
1250 break;
1253 break;
1254 unsigned ShAmt = N1C->getZExtValue();
1256
1257
1258
1260 unsigned XLen = Subtarget->getXLen();
1263 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1266 CurDAG->getTargetConstant(TrailingZeros, DL, VT));
1268 RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
1269 CurDAG->getTargetConstant(TrailingZeros - ShAmt, DL, VT));
1271 return;
1272 }
1273 }
1274
1275
1276
1277
1278
1279
1280
1281
1284 break;
1286 if (ShAmt >= TrailingOnes)
1287 break;
1288
1289 if (TrailingOnes == 32) {
1291 Subtarget->is64Bit() ? RISCV::SRLIW : RISCV::SRLI, DL, VT,
1294 return;
1295 }
1296
1297
1299 break;
1300
1301
1302 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1304 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST, DL, VT,
1307 return;
1308 }
1309
1310 const unsigned Msb = TrailingOnes - 1;
1311 const unsigned Lsb = ShAmt;
1313 return;
1314
1315 unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
1318 CurDAG->getTargetConstant(LShAmt, DL, VT));
1320 RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
1321 CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
1323 return;
1324 }
1327 return;
1328
1330 return;
1331
1332
1333
1334
1335
1336
1337
1338
1339
1341 if (!N1C)
1342 break;
1345 break;
1346 unsigned ShAmt = N1C->getZExtValue();
1347 unsigned ExtSize =
1349
1350 if (ExtSize >= 32 || ShAmt >= ExtSize)
1351 break;
1352 unsigned LShAmt = Subtarget->getXLen() - ExtSize;
1355 CurDAG->getTargetConstant(LShAmt, DL, VT));
1357 RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
1358 CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
1360 return;
1361 }
1364 return;
1365
1366 break;
1367 }
1370 return;
1371
1372 break;
1375 if (!N1C)
1376 break;
1377
1379
1383 if ()
1384 break;
1385 unsigned C2 = C->getZExtValue();
1386 unsigned XLen = Subtarget->getXLen();
1387 assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
1388
1389
1390
1391
1392
1393
1394
1395 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1396
1397 uint64_t C1 = N1C->getZExtValue();
1398
1399
1400 if (LeftShift)
1402 else
1404
1405
1406
1407 bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1408
1410
1411
1412
1413 if (!LeftShift && isMask_64(C1)) {
1415 if (C2 < Leading) {
1416
1417 if (C2 + 32 == Leading) {
1419 RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
1421 return;
1422 }
1423
1424
1425
1426
1427
1428
1429 if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() &&
1433 CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
1434 CurDAG->getTargetConstant(31, DL, VT));
1436 RISCV::SRLIW, DL, VT, SDValue(SRAIW, 0),
1437 CurDAG->getTargetConstant(Leading - 32, DL, VT));
1439 return;
1440 }
1441
1442
1443
1444
1445
1446
1447
1448
1450 const unsigned Lsb = C2;
1452 return;
1453
1454
1455
1456 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1459
1460 Skip |= HasBitTest && Leading == XLen - 1;
1461 if (OneUseOrZExtW && !Skip) {
1464 CurDAG->getTargetConstant(Leading - C2, DL, VT));
1466 RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
1467 CurDAG->getTargetConstant(Leading, DL, VT));
1469 return;
1470 }
1471 }
1472 }
1473
1474
1475
1478
1479 if (C2 + Leading < XLen &&
1481
1482 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1484 CurDAG->getMachineNode(RISCV::SLLI_UW, DL, VT, X,
1485 CurDAG->getTargetConstant(C2, DL, VT));
1487 return;
1488 }
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498 const unsigned Msb = XLen - Leading - 1;
1499 const unsigned Lsb = C2;
1501 return;
1502
1503 if (OneUseOrZExtW && !IsCANDI) {
1504
1505 if (Subtarget->hasStdExtZbkb() && C1 == 0xff00 && C2 == 8) {
1507 RISCV::PACKH, DL, VT,
1508 CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT()), X);
1510 return;
1511 }
1512
1515 CurDAG->getTargetConstant(C2 + Leading, DL, VT));
1517 RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
1518 CurDAG->getTargetConstant(Leading, DL, VT));
1520 return;
1521 }
1522 }
1523 }
1524
1525
1526
1530 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1531 !IsCANDI) {
1532 unsigned SrliOpc = RISCV::SRLI;
1533
1534 if (X.getOpcode() == ISD::AND &&
1536 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1537 SrliOpc = RISCV::SRLIW;
1539 }
1542 CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
1544 RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
1545 CurDAG->getTargetConstant(Trailing, DL, VT));
1547 return;
1548 }
1549
1550 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1551 OneUseOrZExtW && !IsCANDI) {
1554 CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
1556 RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
1557 CurDAG->getTargetConstant(Trailing, DL, VT));
1559 return;
1560 }
1561
1562 if (Trailing > 0 && Leading + Trailing == 32 && C2 + Trailing < XLen &&
1563 OneUseOrZExtW && Subtarget->hasStdExtZba()) {
1566 CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
1568 RISCV::SLLI_UW, DL, VT, SDValue(SRLI, 0),
1569 CurDAG->getTargetConstant(Trailing, DL, VT));
1571 return;
1572 }
1573 }
1574
1575
1576
1580 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1583 CurDAG->getTargetConstant(Trailing - C2, DL, VT));
1585 RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
1586 CurDAG->getTargetConstant(Trailing, DL, VT));
1588 return;
1589 }
1590
1591 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1594 CurDAG->getTargetConstant(Trailing - C2, DL, VT));
1596 RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
1597 CurDAG->getTargetConstant(Trailing, DL, VT));
1599 return;
1600 }
1601
1602
1603 if (C2 < Trailing && Leading + Trailing == 32 && OneUseOrZExtW &&
1604 Subtarget->hasStdExtZba()) {
1607 CurDAG->getTargetConstant(Trailing - C2, DL, VT));
1609 RISCV::SLLI_UW, DL, VT, SDValue(SRLI, 0),
1610 CurDAG->getTargetConstant(Trailing, DL, VT));
1612 return;
1613 }
1614 }
1615 }
1616
1617 const uint64_t C1 = N1C->getZExtValue();
1618
1622 unsigned XLen = Subtarget->getXLen();
1623 assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
1624
1626
1627
1628 bool Skip = C2 > 32 && isInt<12>(N1C->getSExtValue()) &&
1631 X.getConstantOperandVal(1) == 32;
1632
1633
1636 if (C2 > Leading) {
1639 CurDAG->getTargetConstant(C2 - Leading, DL, VT));
1641 RISCV::SRLI, DL, VT, SDValue(SRAI, 0),
1642 CurDAG->getTargetConstant(Leading, DL, VT));
1644 return;
1645 }
1646 }
1647
1648
1649
1650
1654 if (C2 > Leading && Leading > 0 && Trailing > 0) {
1657 CurDAG->getTargetConstant(C2 - Leading, DL, VT));
1659 RISCV::SRLI, DL, VT, SDValue(SRAI, 0),
1660 CurDAG->getTargetConstant(Leading + Trailing, DL, VT));
1662 RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
1663 CurDAG->getTargetConstant(Trailing, DL, VT));
1665 return;
1666 }
1667 }
1668 }
1669
1670
1671
1672
1673
1674
1676 !(C1 == 0xffff && Subtarget->hasStdExtZbb()) &&
1677 !(C1 == 0xffffffff && Subtarget->hasStdExtZba())) {
1680 return;
1681 }
1682
1684 return;
1685
1686 break;
1687 }
1689
1690
1691
1692
1693
1694
1695
1697 if (!N1C || !N1C->hasOneUse())
1698 break;
1699
1700
1703 break;
1704
1706
1707
1709 break;
1710
1711
1712
1713
1714
1715
1716 bool IsANDIOrZExt =
1718 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1719
1720 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1721 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.hasOneUse()))
1722 break;
1723
1724
1725 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1726
1727 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1729 break;
1730
1731
1732
1733
1734 unsigned XLen = Subtarget->getXLen();
1736
1737
1738
1739 uint64_t C1 = N1C->getZExtValue();
1740 unsigned ConstantShift = XLen - LeadingZeros;
1742 break;
1743
1744 uint64_t ShiftedC1 = C1 << ConstantShift;
1745
1746 if (XLen == 32)
1748
1749
1753 CurDAG->getTargetConstant(LeadingZeros, DL, VT));
1754 SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
1757 return;
1758 }
1759 case ISD::LOAD: {
1761 return;
1762
1763 if (Subtarget->hasVendorXCVmem() && !Subtarget->is64Bit()) {
1764
1767 break;
1768
1772
1773 bool Simm12 = false;
1774 bool SignExtend = Load->getExtensionType() == ISD::SEXTLOAD;
1775
1777 int ConstantVal = ConstantOffset->getSExtValue();
1778 Simm12 = isInt<12>(ConstantVal);
1779 if (Simm12)
1781 Offset.getValueType());
1782 }
1783
1784 unsigned Opcode = 0;
1785 switch (Load->getMemoryVT().getSimpleVT().SimpleTy) {
1786 case MVT::i8:
1787 if (Simm12 && SignExtend)
1788 Opcode = RISCV::CV_LB_ri_inc;
1789 else if (Simm12 && !SignExtend)
1790 Opcode = RISCV::CV_LBU_ri_inc;
1791 else if (!Simm12 && SignExtend)
1792 Opcode = RISCV::CV_LB_rr_inc;
1793 else
1794 Opcode = RISCV::CV_LBU_rr_inc;
1795 break;
1796 case MVT::i16:
1797 if (Simm12 && SignExtend)
1798 Opcode = RISCV::CV_LH_ri_inc;
1799 else if (Simm12 && !SignExtend)
1800 Opcode = RISCV::CV_LHU_ri_inc;
1801 else if (!Simm12 && SignExtend)
1802 Opcode = RISCV::CV_LH_rr_inc;
1803 else
1804 Opcode = RISCV::CV_LHU_rr_inc;
1805 break;
1806 case MVT::i32:
1807 if (Simm12)
1808 Opcode = RISCV::CV_LW_ri_inc;
1809 else
1810 Opcode = RISCV::CV_LW_rr_inc;
1811 break;
1812 default:
1813 break;
1814 }
1815 if (!Opcode)
1816 break;
1817
1821 return;
1822 }
1823 break;
1824 }
1825 case RISCVISD::LD_RV32: {
1826 assert(Subtarget->hasStdExtZilsd() && "LD_RV32 is only used with Zilsd");
1827
1832
1835 RISCV::LD_RV32, DL, {MVT::Untyped, MVT::Other}, Ops);
1836 SDValue Lo = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_even, DL,
1837 MVT::i32, SDValue(New, 0));
1838 SDValue Hi = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_odd, DL,
1839 MVT::i32, SDValue(New, 0));
1845 return;
1846 }
1847 case RISCVISD::SD_RV32: {
1852
1855
1857
1859 RegPair = CurDAG->getRegister(RISCV::X0_Pair, MVT::Untyped);
1860 } else {
1862 CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32), Lo,
1863 CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32), Hi,
1864 CurDAG->getTargetConstant(RISCV::sub_gpr_odd, DL, MVT::i32)};
1865
1866 RegPair = SDValue(CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
1867 MVT::Untyped, Ops),
1868 0);
1869 }
1870
1876 return;
1877 }
1878 case RISCVISD::PPACK_DH: {
1879 assert(Subtarget->enablePExtCodeGen() && Subtarget->isRV32());
1880
1885
1887 CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32), Val0,
1888 CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32), Val2,
1889 CurDAG->getTargetConstant(RISCV::sub_gpr_odd, DL, MVT::i32)};
1891 SDValue(CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
1892 MVT::Untyped, Ops),
1893 0);
1895 CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32), Val1,
1896 CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32), Val3,
1897 CurDAG->getTargetConstant(RISCV::sub_gpr_odd, DL, MVT::i32)};
1899 SDValue(CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
1900 MVT::Untyped, Ops1),
1901 0);
1902
1904 RISCV::PPAIRE_DB, DL, MVT::Untyped, {RegPair0, RegPair1});
1905
1906 SDValue Lo = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_even, DL,
1907 MVT::i32, SDValue(PackDH, 0));
1908 SDValue Hi = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_odd, DL,
1909 MVT::i32, SDValue(PackDH, 0));
1913 return;
1914 }
1916 unsigned IntNo = Node->getConstantOperandVal(0);
1917 switch (IntNo) {
1918
1919 default:
1920 break;
1921 case Intrinsic::riscv_vmsgeu:
1922 case Intrinsic::riscv_vmsge: {
1925 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1926 bool IsCmpConstant = false;
1927 bool IsCmpMinimum = false;
1928
1930 break;
1931
1932 int64_t CVal = 0;
1935 IsCmpConstant = true;
1936 CVal = C->getSExtValue();
1937 if (CVal >= -15 && CVal <= 16) {
1938 if (!IsUnsigned || CVal != 0)
1939 break;
1940 IsCmpMinimum = true;
1943 .getSExtValue()) {
1944 IsCmpMinimum = true;
1945 }
1946 }
1947 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode, VMSGTOpcode;
1949 default:
1951#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
1952 case RISCVVType::lmulenum: \
1953 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1954 : RISCV::PseudoVMSLT_VX_##suffix; \
1955 VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \
1956 : RISCV::PseudoVMSGT_VX_##suffix; \
1957 break;
1965#undef CASE_VMSLT_OPCODES
1966 }
1967
1969 default:
1971#define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix) \
1972 case RISCVVType::lmulenum: \
1973 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1974 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix; \
1975 break;
1983#undef CASE_VMNAND_VMSET_OPCODES
1984 }
1987 SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
1990
1991
1992 if (IsCmpMinimum) {
1994 CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, MaskSEW));
1995 return;
1996 }
1997
1998 if (IsCmpConstant) {
2001
2003 {Src1, Imm, VL, SEW}));
2004 return;
2005 }
2006
2007
2008
2010 CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
2011 0);
2013 {Cmp, Cmp, VL, MaskSEW}));
2014 return;
2015 }
2016 case Intrinsic::riscv_vmsgeu_mask:
2017 case Intrinsic::riscv_vmsge_mask: {
2020 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
2021 bool IsCmpConstant = false;
2022 bool IsCmpMinimum = false;
2023
2025 break;
2026
2028 int64_t CVal = 0;
2030 IsCmpConstant = true;
2031 CVal = C->getSExtValue();
2032 if (CVal >= -15 && CVal <= 16) {
2033 if (!IsUnsigned || CVal != 0)
2034 break;
2035 IsCmpMinimum = true;
2038 .getSExtValue()) {
2039 IsCmpMinimum = true;
2040 }
2041 }
2042 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
2043 VMOROpcode, VMSGTMaskOpcode;
2045 default:
2047#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
2048 case RISCVVType::lmulenum: \
2049 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
2050 : RISCV::PseudoVMSLT_VX_##suffix; \
2051 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
2052 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
2053 VMSGTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix##_MASK \
2054 : RISCV::PseudoVMSGT_VX_##suffix##_MASK; \
2055 break;
2063#undef CASE_VMSLT_OPCODES
2064 }
2065
2067 default:
2069#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
2070 case RISCVVType::lmulenum: \
2071 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
2072 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
2073 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
2074 break;
2082#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
2083 }
2086 SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
2089 SDValue MaskedOff = Node->getOperand(1);
2091
2092
2093 if (IsCmpMinimum) {
2094
2095
2096 if (Mask == MaskedOff) {
2098 return;
2099 }
2101 CurDAG->getMachineNode(VMOROpcode, DL, VT,
2102 {Mask, MaskedOff, VL, MaskSEW}));
2103 return;
2104 }
2105
2106
2107
2108
2109 if (Mask == MaskedOff) {
2111 CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
2112 0);
2114 {Mask, Cmp, VL, MaskSEW}));
2115 return;
2116 }
2117
2120
2121 if (IsCmpConstant) {
2124
2126 VMSGTMaskOpcode, DL, VT,
2127 {MaskedOff, Src1, Imm, Mask, VL, SEW, PolicyOp}));
2128 return;
2129 }
2130
2131
2132
2133
2134
2135
2137 {MaskedOff, Src1, Src2, Mask,
2138 VL, SEW, PolicyOp}),
2139 0);
2140
2142 {Cmp, Mask, VL, MaskSEW}));
2143 return;
2144 }
2145 case Intrinsic::riscv_vsetvli:
2146 case Intrinsic::riscv_vsetvlimax:
2148 case Intrinsic::riscv_sf_vsettnt:
2149 case Intrinsic::riscv_sf_vsettm:
2150 case Intrinsic::riscv_sf_vsettk:
2152 }
2153 break;
2154 }
2156 unsigned IntNo = Node->getConstantOperandVal(1);
2157 switch (IntNo) {
2158
2159 default:
2160 break;
2161 case Intrinsic::riscv_vlseg2:
2162 case Intrinsic::riscv_vlseg3:
2163 case Intrinsic::riscv_vlseg4:
2164 case Intrinsic::riscv_vlseg5:
2165 case Intrinsic::riscv_vlseg6:
2166 case Intrinsic::riscv_vlseg7:
2167 case Intrinsic::riscv_vlseg8: {
2169 false);
2170 return;
2171 }
2172 case Intrinsic::riscv_vlseg2_mask:
2173 case Intrinsic::riscv_vlseg3_mask:
2174 case Intrinsic::riscv_vlseg4_mask:
2175 case Intrinsic::riscv_vlseg5_mask:
2176 case Intrinsic::riscv_vlseg6_mask:
2177 case Intrinsic::riscv_vlseg7_mask:
2178 case Intrinsic::riscv_vlseg8_mask: {
2180 false);
2181 return;
2182 }
2183 case Intrinsic::riscv_vlsseg2:
2184 case Intrinsic::riscv_vlsseg3:
2185 case Intrinsic::riscv_vlsseg4:
2186 case Intrinsic::riscv_vlsseg5:
2187 case Intrinsic::riscv_vlsseg6:
2188 case Intrinsic::riscv_vlsseg7:
2189 case Intrinsic::riscv_vlsseg8: {
2191 true);
2192 return;
2193 }
2194 case Intrinsic::riscv_vlsseg2_mask:
2195 case Intrinsic::riscv_vlsseg3_mask:
2196 case Intrinsic::riscv_vlsseg4_mask:
2197 case Intrinsic::riscv_vlsseg5_mask:
2198 case Intrinsic::riscv_vlsseg6_mask:
2199 case Intrinsic::riscv_vlsseg7_mask:
2200 case Intrinsic::riscv_vlsseg8_mask: {
2202 true);
2203 return;
2204 }
2205 case Intrinsic::riscv_vloxseg2:
2206 case Intrinsic::riscv_vloxseg3:
2207 case Intrinsic::riscv_vloxseg4:
2208 case Intrinsic::riscv_vloxseg5:
2209 case Intrinsic::riscv_vloxseg6:
2210 case Intrinsic::riscv_vloxseg7:
2211 case Intrinsic::riscv_vloxseg8:
2213 true);
2214 return;
2215 case Intrinsic::riscv_vluxseg2:
2216 case Intrinsic::riscv_vluxseg3:
2217 case Intrinsic::riscv_vluxseg4:
2218 case Intrinsic::riscv_vluxseg5:
2219 case Intrinsic::riscv_vluxseg6:
2220 case Intrinsic::riscv_vluxseg7:
2221 case Intrinsic::riscv_vluxseg8:
2223 false);
2224 return;
2225 case Intrinsic::riscv_vloxseg2_mask:
2226 case Intrinsic::riscv_vloxseg3_mask:
2227 case Intrinsic::riscv_vloxseg4_mask:
2228 case Intrinsic::riscv_vloxseg5_mask:
2229 case Intrinsic::riscv_vloxseg6_mask:
2230 case Intrinsic::riscv_vloxseg7_mask:
2231 case Intrinsic::riscv_vloxseg8_mask:
2233 true);
2234 return;
2235 case Intrinsic::riscv_vluxseg2_mask:
2236 case Intrinsic::riscv_vluxseg3_mask:
2237 case Intrinsic::riscv_vluxseg4_mask:
2238 case Intrinsic::riscv_vluxseg5_mask:
2239 case Intrinsic::riscv_vluxseg6_mask:
2240 case Intrinsic::riscv_vluxseg7_mask:
2241 case Intrinsic::riscv_vluxseg8_mask:
2243 false);
2244 return;
2245 case Intrinsic::riscv_vlseg8ff:
2246 case Intrinsic::riscv_vlseg7ff:
2247 case Intrinsic::riscv_vlseg6ff:
2248 case Intrinsic::riscv_vlseg5ff:
2249 case Intrinsic::riscv_vlseg4ff:
2250 case Intrinsic::riscv_vlseg3ff:
2251 case Intrinsic::riscv_vlseg2ff: {
2253 return;
2254 }
2255 case Intrinsic::riscv_vlseg8ff_mask:
2256 case Intrinsic::riscv_vlseg7ff_mask:
2257 case Intrinsic::riscv_vlseg6ff_mask:
2258 case Intrinsic::riscv_vlseg5ff_mask:
2259 case Intrinsic::riscv_vlseg4ff_mask:
2260 case Intrinsic::riscv_vlseg3ff_mask:
2261 case Intrinsic::riscv_vlseg2ff_mask: {
2263 return;
2264 }
2265 case Intrinsic::riscv_vloxei:
2266 case Intrinsic::riscv_vloxei_mask:
2267 case Intrinsic::riscv_vluxei:
2268 case Intrinsic::riscv_vluxei_mask: {
2269 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
2270 IntNo == Intrinsic::riscv_vluxei_mask;
2271 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
2272 IntNo == Intrinsic::riscv_vloxei_mask;
2273
2274 MVT VT = Node->getSimpleValueType(0);
2276
2277 unsigned CurOp = 2;
2280
2281 MVT IndexVT;
2283 true, Operands,
2284 true, &IndexVT);
2285
2287 "Element count mismatch");
2288
2292 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
2294 "index values when XLEN=32");
2295 }
2297 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
2298 static_cast<unsigned>(IndexLMUL));
2300 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
2301
2303
2305 return;
2306 }
2307 case Intrinsic::riscv_vlm:
2308 case Intrinsic::riscv_vle:
2309 case Intrinsic::riscv_vle_mask:
2310 case Intrinsic::riscv_vlse:
2311 case Intrinsic::riscv_vlse_mask: {
2312 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
2313 IntNo == Intrinsic::riscv_vlse_mask;
2314 bool IsStrided =
2315 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
2316
2317 MVT VT = Node->getSimpleValueType(0);
2319
2320
2321
2322
2323
2324
2325
2326 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
2327 unsigned CurOp = 2;
2329 if (HasPassthruOperand)
2331 else {
2332
2333
2335 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
2337 }
2339 Operands, true);
2340
2343 RISCV::getVLEPseudo(IsMasked, IsStrided, false, Log2SEW,
2344 static_cast<unsigned>(LMUL));
2346 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
2347
2349
2351 return;
2352 }
2353 case Intrinsic::riscv_vleff:
2354 case Intrinsic::riscv_vleff_mask: {
2355 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
2356
2357 MVT VT = Node->getSimpleValueType(0);
2359
2360 unsigned CurOp = 2;
2364 false, Operands,
2365 true);
2366
2369 RISCV::getVLEPseudo(IsMasked, false, true,
2370 Log2SEW, static_cast<unsigned>(LMUL));
2372 P->Pseudo, DL, Node->getVTList(), Operands);
2374
2376 return;
2377 }
2378 case Intrinsic::riscv_nds_vln:
2379 case Intrinsic::riscv_nds_vln_mask:
2380 case Intrinsic::riscv_nds_vlnu:
2381 case Intrinsic::riscv_nds_vlnu_mask: {
2382 bool IsMasked = IntNo == Intrinsic::riscv_nds_vln_mask ||
2383 IntNo == Intrinsic::riscv_nds_vlnu_mask;
2384 bool IsUnsigned = IntNo == Intrinsic::riscv_nds_vlnu ||
2385 IntNo == Intrinsic::riscv_nds_vlnu_mask;
2386
2387 MVT VT = Node->getSimpleValueType(0);
2389 unsigned CurOp = 2;
2391
2394 false, Operands,
2395 true);
2396
2399 IsMasked, IsUnsigned, Log2SEW, static_cast<unsigned>(LMUL));
2401 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
2402
2404 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
2405
2407 return;
2408 }
2409 }
2410 break;
2411 }
2413 unsigned IntNo = Node->getConstantOperandVal(1);
2414 switch (IntNo) {
2415 case Intrinsic::riscv_vsseg2:
2416 case Intrinsic::riscv_vsseg3:
2417 case Intrinsic::riscv_vsseg4:
2418 case Intrinsic::riscv_vsseg5:
2419 case Intrinsic::riscv_vsseg6:
2420 case Intrinsic::riscv_vsseg7:
2421 case Intrinsic::riscv_vsseg8: {
2423 false);
2424 return;
2425 }
2426 case Intrinsic::riscv_vsseg2_mask:
2427 case Intrinsic::riscv_vsseg3_mask:
2428 case Intrinsic::riscv_vsseg4_mask:
2429 case Intrinsic::riscv_vsseg5_mask:
2430 case Intrinsic::riscv_vsseg6_mask:
2431 case Intrinsic::riscv_vsseg7_mask:
2432 case Intrinsic::riscv_vsseg8_mask: {
2434 false);
2435 return;
2436 }
2437 case Intrinsic::riscv_vssseg2:
2438 case Intrinsic::riscv_vssseg3:
2439 case Intrinsic::riscv_vssseg4:
2440 case Intrinsic::riscv_vssseg5:
2441 case Intrinsic::riscv_vssseg6:
2442 case Intrinsic::riscv_vssseg7:
2443 case Intrinsic::riscv_vssseg8: {
2445 true);
2446 return;
2447 }
2448 case Intrinsic::riscv_vssseg2_mask:
2449 case Intrinsic::riscv_vssseg3_mask:
2450 case Intrinsic::riscv_vssseg4_mask:
2451 case Intrinsic::riscv_vssseg5_mask:
2452 case Intrinsic::riscv_vssseg6_mask:
2453 case Intrinsic::riscv_vssseg7_mask:
2454 case Intrinsic::riscv_vssseg8_mask: {
2456 true);
2457 return;
2458 }
2459 case Intrinsic::riscv_vsoxseg2:
2460 case Intrinsic::riscv_vsoxseg3:
2461 case Intrinsic::riscv_vsoxseg4:
2462 case Intrinsic::riscv_vsoxseg5:
2463 case Intrinsic::riscv_vsoxseg6:
2464 case Intrinsic::riscv_vsoxseg7:
2465 case Intrinsic::riscv_vsoxseg8:
2467 true);
2468 return;
2469 case Intrinsic::riscv_vsuxseg2:
2470 case Intrinsic::riscv_vsuxseg3:
2471 case Intrinsic::riscv_vsuxseg4:
2472 case Intrinsic::riscv_vsuxseg5:
2473 case Intrinsic::riscv_vsuxseg6:
2474 case Intrinsic::riscv_vsuxseg7:
2475 case Intrinsic::riscv_vsuxseg8:
2477 false);
2478 return;
2479 case Intrinsic::riscv_vsoxseg2_mask:
2480 case Intrinsic::riscv_vsoxseg3_mask:
2481 case Intrinsic::riscv_vsoxseg4_mask:
2482 case Intrinsic::riscv_vsoxseg5_mask:
2483 case Intrinsic::riscv_vsoxseg6_mask:
2484 case Intrinsic::riscv_vsoxseg7_mask:
2485 case Intrinsic::riscv_vsoxseg8_mask:
2487 true);
2488 return;
2489 case Intrinsic::riscv_vsuxseg2_mask:
2490 case Intrinsic::riscv_vsuxseg3_mask:
2491 case Intrinsic::riscv_vsuxseg4_mask:
2492 case Intrinsic::riscv_vsuxseg5_mask:
2493 case Intrinsic::riscv_vsuxseg6_mask:
2494 case Intrinsic::riscv_vsuxseg7_mask:
2495 case Intrinsic::riscv_vsuxseg8_mask:
2497 false);
2498 return;
2499 case Intrinsic::riscv_vsoxei:
2500 case Intrinsic::riscv_vsoxei_mask:
2501 case Intrinsic::riscv_vsuxei:
2502 case Intrinsic::riscv_vsuxei_mask: {
2503 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
2504 IntNo == Intrinsic::riscv_vsuxei_mask;
2505 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
2506 IntNo == Intrinsic::riscv_vsoxei_mask;
2507
2508 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2510
2511 unsigned CurOp = 2;
2513 Operands.push_back(Node->getOperand(CurOp++));
2514
2515 MVT IndexVT;
2517 true, Operands,
2518 false, &IndexVT);
2519
2521 "Element count mismatch");
2522
2526 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
2528 "index values when XLEN=32");
2529 }
2531 IsMasked, IsOrdered, IndexLog2EEW,
2532 static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
2534 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
2535
2537
2539 return;
2540 }
2541 case Intrinsic::riscv_vsm:
2542 case Intrinsic::riscv_vse:
2543 case Intrinsic::riscv_vse_mask:
2544 case Intrinsic::riscv_vsse:
2545 case Intrinsic::riscv_vsse_mask: {
2546 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
2547 IntNo == Intrinsic::riscv_vsse_mask;
2548 bool IsStrided =
2549 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
2550
2551 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2553
2554 unsigned CurOp = 2;
2556 Operands.push_back(Node->getOperand(CurOp++));
2557
2559 Operands);
2560
2563 IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
2565 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
2567
2569 return;
2570 }
2571 case Intrinsic::riscv_sf_vc_x_se:
2572 case Intrinsic::riscv_sf_vc_i_se:
2574 return;
2575 case Intrinsic::riscv_sf_vlte8:
2576 case Intrinsic::riscv_sf_vlte16:
2577 case Intrinsic::riscv_sf_vlte32:
2578 case Intrinsic::riscv_sf_vlte64: {
2579 unsigned Log2SEW;
2580 unsigned PseudoInst;
2581 switch (IntNo) {
2582 case Intrinsic::riscv_sf_vlte8:
2583 PseudoInst = RISCV::PseudoSF_VLTE8;
2584 Log2SEW = 3;
2585 break;
2586 case Intrinsic::riscv_sf_vlte16:
2587 PseudoInst = RISCV::PseudoSF_VLTE16;
2588 Log2SEW = 4;
2589 break;
2590 case Intrinsic::riscv_sf_vlte32:
2591 PseudoInst = RISCV::PseudoSF_VLTE32;
2592 Log2SEW = 5;
2593 break;
2594 case Intrinsic::riscv_sf_vlte64:
2595 PseudoInst = RISCV::PseudoSF_VLTE64;
2596 Log2SEW = 6;
2597 break;
2598 }
2599
2600 SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
2601 SDValue TWidenOp = CurDAG->getTargetConstant(1, DL, XLenVT);
2602 SDValue Operands[] = {Node->getOperand(2),
2603 Node->getOperand(3),
2604 Node->getOperand(4),
2605 SEWOp,
2606 TWidenOp,
2607 Node->getOperand(0)};
2608
2610 CurDAG->getMachineNode(PseudoInst, DL, Node->getVTList(), Operands);
2612 CurDAG->setNodeMemRefs(TileLoad, {MemOp->getMemOperand()});
2613
2615 return;
2616 }
2617 case Intrinsic::riscv_sf_mm_s_s:
2618 case Intrinsic::riscv_sf_mm_s_u:
2619 case Intrinsic::riscv_sf_mm_u_s:
2620 case Intrinsic::riscv_sf_mm_u_u:
2621 case Intrinsic::riscv_sf_mm_e5m2_e5m2:
2622 case Intrinsic::riscv_sf_mm_e5m2_e4m3:
2623 case Intrinsic::riscv_sf_mm_e4m3_e5m2:
2624 case Intrinsic::riscv_sf_mm_e4m3_e4m3:
2625 case Intrinsic::riscv_sf_mm_f_f: {
2626 bool HasFRM = false;
2627 unsigned PseudoInst;
2628 switch (IntNo) {
2629 case Intrinsic::riscv_sf_mm_s_s:
2630 PseudoInst = RISCV::PseudoSF_MM_S_S;
2631 break;
2632 case Intrinsic::riscv_sf_mm_s_u:
2633 PseudoInst = RISCV::PseudoSF_MM_S_U;
2634 break;
2635 case Intrinsic::riscv_sf_mm_u_s:
2636 PseudoInst = RISCV::PseudoSF_MM_U_S;
2637 break;
2638 case Intrinsic::riscv_sf_mm_u_u:
2639 PseudoInst = RISCV::PseudoSF_MM_U_U;
2640 break;
2641 case Intrinsic::riscv_sf_mm_e5m2_e5m2:
2642 PseudoInst = RISCV::PseudoSF_MM_E5M2_E5M2;
2643 HasFRM = true;
2644 break;
2645 case Intrinsic::riscv_sf_mm_e5m2_e4m3:
2646 PseudoInst = RISCV::PseudoSF_MM_E5M2_E4M3;
2647 HasFRM = true;
2648 break;
2649 case Intrinsic::riscv_sf_mm_e4m3_e5m2:
2650 PseudoInst = RISCV::PseudoSF_MM_E4M3_E5M2;
2651 HasFRM = true;
2652 break;
2653 case Intrinsic::riscv_sf_mm_e4m3_e4m3:
2654 PseudoInst = RISCV::PseudoSF_MM_E4M3_E4M3;
2655 HasFRM = true;
2656 break;
2657 case Intrinsic::riscv_sf_mm_f_f:
2658 if (Node->getOperand(3).getValueType().getScalarType() == MVT::bf16)
2659 PseudoInst = RISCV::PseudoSF_MM_F_F_ALT;
2660 else
2661 PseudoInst = RISCV::PseudoSF_MM_F_F;
2662 HasFRM = true;
2663 break;
2664 }
2665 uint64_t TileNum = Node->getConstantOperandVal(2);
2675
2676
2677 if (IntNo == Intrinsic::riscv_sf_mm_f_f && Log2SEW == 5 &&
2680
2682 {CurDAG->getRegister(getTileReg(TileNum), XLenVT), Op1, Op2});
2683 if (HasFRM)
2686 Operands.append({TmOp, TnOp, TkOp,
2687 CurDAG->getTargetConstant(Log2SEW, DL, XLenVT), TWidenOp,
2688 Chain});
2689
2690 auto *NewNode =
2691 CurDAG->getMachineNode(PseudoInst, DL, Node->getVTList(), Operands);
2692
2694 return;
2695 }
2696 case Intrinsic::riscv_sf_vtzero_t: {
2697 uint64_t TileNum = Node->getConstantOperandVal(2);
2703 auto *NewNode = CurDAG->getMachineNode(
2704 RISCV::PseudoSF_VTZERO_T, DL, Node->getVTList(),
2705 {CurDAG->getRegister(getTileReg(TileNum), XLenVT), Tm, Tn, Log2SEW,
2706 TWiden, Chain});
2707
2709 return;
2710 }
2711 }
2712 break;
2713 }
2714 case ISD::BITCAST: {
2715 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
2716
2717
2722 return;
2723 }
2724 if (Subtarget->enablePExtCodeGen()) {
2725 bool Is32BitCast =
2726 (VT == MVT::i32 && (SrcVT == MVT::v4i8 || SrcVT == MVT::v2i16)) ||
2727 (SrcVT == MVT::i32 && (VT == MVT::v4i8 || VT == MVT::v2i16));
2728 bool Is64BitCast =
2729 (VT == MVT::i64 && (SrcVT == MVT::v8i8 || SrcVT == MVT::v4i16 ||
2730 SrcVT == MVT::v2i32)) ||
2731 (SrcVT == MVT::i64 &&
2732 (VT == MVT::v8i8 || VT == MVT::v4i16 || VT == MVT::v2i32));
2733 if (Is32BitCast || Is64BitCast) {
2736 return;
2737 }
2738 }
2739 break;
2740 }
2742 if (Subtarget->enablePExtCodeGen()) {
2743 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
2744 if ((VT == MVT::v2i32 && SrcVT == MVT::i64) ||
2745 (VT == MVT::v4i8 && SrcVT == MVT::i32)) {
2748 return;
2749 }
2750 }
2751 break;
2753 case RISCVISD::TUPLE_INSERT: {
2757 auto Idx = Node->getConstantOperandVal(2);
2759
2761 MVT SubVecContainerVT = SubVecVT;
2762
2764 SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
2766 [[maybe_unused]] bool ExactlyVecRegSized =
2767 Subtarget->expandVScale(SubVecVT.getSizeInBits())
2768 .isKnownMultipleOf(Subtarget->expandVScale(VecRegSize));
2770 .getKnownMinValue()));
2771 assert(Idx == 0 && (ExactlyVecRegSized || V.isUndef()));
2772 }
2773 MVT ContainerVT = VT;
2775 ContainerVT = TLI.getContainerForFixedLengthVector(VT);
2776
2777 const auto *TRI = Subtarget->getRegisterInfo();
2778 unsigned SubRegIdx;
2779 std::tie(SubRegIdx, Idx) =
2781 ContainerVT, SubVecContainerVT, Idx, TRI);
2782
2783
2784
2785
2786 if (Idx != 0)
2787 break;
2788
2791 [[maybe_unused]] bool IsSubVecPartReg =
2795 assert((V.getValueType().isRISCVVectorTuple() || !IsSubVecPartReg ||
2796 V.isUndef()) &&
2797 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2798 "the subvector is smaller than a full-sized register");
2799
2800
2801
2802 if (SubRegIdx == RISCV::NoSubRegister) {
2803 unsigned InRegClassID =
2806 InRegClassID &&
2807 "Unexpected subvector extraction");
2808 SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
2809 SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
2810 DL, VT, SubV, RC);
2812 return;
2813 }
2814
2815 SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
2817 return;
2818 }
2820 case RISCVISD::TUPLE_EXTRACT: {
2822 auto Idx = Node->getConstantOperandVal(1);
2823 MVT InVT = V.getSimpleValueType();
2825
2827 MVT SubVecContainerVT = VT;
2828
2831 SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
2832 }
2834 InVT = TLI.getContainerForFixedLengthVector(InVT);
2835
2836 const auto *TRI = Subtarget->getRegisterInfo();
2837 unsigned SubRegIdx;
2838 std::tie(SubRegIdx, Idx) =
2840 InVT, SubVecContainerVT, Idx, TRI);
2841
2842
2843
2844
2845 if (Idx != 0)
2846 break;
2847
2848
2849
2850 if (SubRegIdx == RISCV::NoSubRegister) {
2853 InRegClassID &&
2854 "Unexpected subvector extraction");
2855 SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
2857 CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
2859 return;
2860 }
2861
2862 SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
2864 return;
2865 }
2866 case RISCVISD::VMV_S_X_VL:
2867 case RISCVISD::VFMV_S_F_VL:
2868 case RISCVISD::VMV_V_X_VL:
2869 case RISCVISD::VFMV_V_F_VL: {
2870
2871 bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
2872 Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
2873 if (->getOperand(0).isUndef())
2874 break;
2877
2878
2879 if (!Ld || Ld->isIndexed())
2880 break;
2881 EVT MemVT = Ld->getMemoryVT();
2882
2884 break;
2887 break;
2888
2890 if (IsScalarMove) {
2891
2892
2894 break;
2896 } else
2898
2900 SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
2901
2902
2903
2905
2906
2907 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2908 break;
2909
2911 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT), 0),
2912 Ld->getBasePtr()};
2913 if (IsStrided)
2914 Operands.push_back(CurDAG->getRegister(RISCV::X0, XLenVT));
2916 SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
2917 Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
2918
2921 false, IsStrided, false,
2922 Log2SEW, static_cast<unsigned>(LMUL));
2924 CurDAG->getMachineNode(P->Pseudo, DL, {VT, MVT::Other}, Operands);
2925
2927
2928 CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
2929
2931 return;
2932 }
2933 case ISD::PREFETCH:
2934 unsigned Locality = Node->getConstantOperandVal(3);
2935 if (Locality > 2)
2936 break;
2937
2941
2942 int NontemporalLevel = 0;
2943 switch (Locality) {
2944 case 0:
2945 NontemporalLevel = 3;
2946 break;
2947 case 1:
2948 NontemporalLevel = 1;
2949 break;
2950 case 2:
2951 NontemporalLevel = 0;
2952 break;
2953 default:
2955 }
2956
2957 if (NontemporalLevel & 0b1)
2959 if (NontemporalLevel & 0b10)
2961 break;
2962 }
2963
2964
2965 SelectCode(Node);
2966}
2967
2970 std::vector &OutOps) {
2971
2972
2973 switch (ConstraintID) {
2978 assert(Found && "SelectAddrRegImm should always succeed");
2979 OutOps.push_back(Op0);
2980 OutOps.push_back(Op1);
2981 return false;
2982 }
2984 OutOps.push_back(Op);
2985 OutOps.push_back(
2986 CurDAG->getTargetConstant(0, SDLoc(Op), Subtarget->getXLenVT()));
2987 return false;
2988 default:
2991 }
2992
2993 return true;
2994}
2995
2999 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
3000 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT());
3001 return true;
3002 }
3003
3004 return false;
3005}
3006
3007
3011 bool IsPrefetch = false) {
3013 return false;
3014
3016
3017
3018
3019
3023 if (IsPrefetch && (Lo12 & 0b11111) != 0)
3024 return false;
3025 if (Hi) {
3026 int64_t Hi20 = (Hi >> 12) & 0xfffff;
3030 0);
3031 } else {
3033 }
3035 return true;
3036 }
3037
3038
3040
3041
3042
3043 if (Seq.back().getOpcode() != RISCV::ADDI)
3044 return false;
3045 Lo12 = Seq.back().getImm();
3046 if (IsPrefetch && (Lo12 & 0b11111) != 0)
3047 return false;
3048
3049
3051 assert(!Seq.empty() && "Expected more instructions in sequence");
3052
3055 return true;
3056}
3057
3058
3059
3061 for (auto *User : Add->users()) {
3062 if (User->getOpcode() != ISD::LOAD && User->getOpcode() != ISD::STORE &&
3063 User->getOpcode() != RISCVISD::LD_RV32 &&
3064 User->getOpcode() != RISCVISD::SD_RV32 &&
3065 User->getOpcode() != ISD::ATOMIC_LOAD &&
3066 User->getOpcode() != ISD::ATOMIC_STORE)
3067 return false;
3069 if (!VT.isScalarInteger() && VT != MVT::f16 && VT != MVT::f32 &&
3070 VT != MVT::f64)
3071 return false;
3072
3073 if (User->getOpcode() == ISD::STORE &&
3075 return false;
3076 if (User->getOpcode() == ISD::ATOMIC_STORE &&
3078 return false;
3079 if (User->getOpcode() == RISCVISD::SD_RV32 &&
3081 return false;
3083 return false;
3084 }
3085
3086 return true;
3087}
3088
3090 switch (User->getOpcode()) {
3091 default:
3092 return false;
3093 case ISD::LOAD:
3094 case RISCVISD::LD_RV32:
3095 case ISD::ATOMIC_LOAD:
3096 break;
3097 case ISD::STORE:
3098
3100 return false;
3101 break;
3102 case RISCVISD::SD_RV32:
3103
3105 return false;
3106 break;
3107 case ISD::ATOMIC_STORE:
3108
3110 return false;
3111 break;
3112 }
3113
3114 return true;
3115}
3116
3117
3118
3119
3121 Align Alignment) {
3123 for (auto *User : Addr->users()) {
3124
3125
3127 continue;
3128
3131 if ((CVal) || Alignment <= CVal)
3132 return false;
3133
3134
3135 for (auto *AddUser : User->users())
3137 return false;
3138
3139 continue;
3140 }
3141
3142 return false;
3143 }
3144
3145 return true;
3146}
3147
3151 return true;
3152
3155
3156 if (Addr.getOpcode() == RISCVISD::ADD_LO) {
3157 bool CanFold = true;
3158
3159
3163 GA->getGlobal()->getPointerAlignment(DL), GA->getOffset());
3165 CanFold = false;
3166 }
3167 if (CanFold) {
3170 return true;
3171 }
3172 }
3173
3174 if (CurDAG->isBaseWithConstantOffset(Addr)) {
3178 if (Base.getOpcode() == RISCVISD::ADD_LO) {
3181
3182
3183
3184
3185
3188 GA->getGlobal()->getPointerAlignment(DL), GA->getOffset());
3189 if ((CVal == 0 || Alignment > CVal) &&
3191 int64_t CombinedOffset = CVal + GA->getOffset();
3195 CombinedOffset, GA->getTargetFlags());
3196 return true;
3197 }
3198 }
3199 }
3200
3202 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
3203 Offset = CurDAG->getSignedTargetConstant(CVal, DL, VT);
3204 return true;
3205 }
3206 }
3207
3208
3211 assert((CVal) && "simm12 not already handled?");
3212
3213
3214
3215
3216 if (CVal >= -4096 && CVal <= 4094) {
3217 int64_t Adj = CVal < 0 ? -2048 : 2047;
3220 CurDAG->getSignedTargetConstant(Adj, DL, VT)),
3221 0);
3222 Offset = CurDAG->getSignedTargetConstant(CVal - Adj, DL, VT);
3223 return true;
3224 }
3225
3226
3227
3228
3229
3230
3231
3234 Offset, false)) {
3235
3238 0);
3239 return true;
3240 }
3241 }
3242
3244 false))
3245 return true;
3246
3247 Base = Addr;
3249 return true;
3250}
3251
3252
3256 return true;
3257
3260
3261 if (CurDAG->isBaseWithConstantOffset(Addr)) {
3265
3267 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
3268 Offset = CurDAG->getSignedTargetConstant(CVal, DL, VT);
3269 return true;
3270 }
3271 }
3272
3273 Base = Addr;
3275 return true;
3276}
3277
3278
3279
3283 return true;
3284
3287
3288 if (CurDAG->isBaseWithConstantOffset(Addr)) {
3292
3293
3294 if ((CVal & 0b11111) != 0) {
3295 Base = Addr;
3297 return true;
3298 }
3299
3301 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
3302 Offset = CurDAG->getSignedTargetConstant(CVal, DL, VT);
3303 return true;
3304 }
3305 }
3306
3307
3310 assert((CVal) && "simm12 not already handled?");
3311
3312
3313
3314 if ((-2049 >= CVal && CVal >= -4096) || (4065 >= CVal && CVal >= 2017)) {
3315 int64_t Adj = CVal < 0 ? -2048 : 2016;
3316 int64_t AdjustedOffset = CVal - Adj;
3320 CurDAG->getSignedTargetConstant(AdjustedOffset, DL, VT)),
3321 0);
3322 Offset = CurDAG->getSignedTargetConstant(Adj, DL, VT);
3323 return true;
3324 }
3325
3327 Offset, true)) {
3328
3331 0);
3332 return true;
3333 }
3334 }
3335
3337 true))
3338 return true;
3339
3340 Base = Addr;
3342 return true;
3343}
3344
3345
3348 if (User->getOpcode() != ISD::LOAD && User->getOpcode() != ISD::STORE)
3349 return false;
3352 (Subtarget.hasVendorXTHeadMemIdx() || Subtarget.hasVendorXqcisls())) &&
3353 !((VT == MVT::f32 || VT == MVT::f64) &&
3354 Subtarget.hasVendorXTHeadFMemIdx()))
3355 return false;
3356
3357 if (User->getOpcode() == ISD::STORE &&
3359 return false;
3360
3361 return true;
3362}
3363
3364
3365
3366
3367
3368
3372 bool FoundADDI = false;
3373 for (auto *User : Add->users()) {
3375 continue;
3376
3377
3378 if (!Shift || FoundADDI || User->getOpcode() != ISD::ADD ||
3381 return false;
3382
3383 FoundADDI = true;
3384
3385
3387 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
3388 if (Subtarget.hasShlAdd(ShiftAmt))
3389 return false;
3390
3391
3392 for (auto *ADDIUser : User->users())
3394 return false;
3395 }
3396
3397 return true;
3398}
3399
3401 unsigned MaxShiftAmount,
3405 return false;
3408
3410 auto SelectShl = [this, VT, MaxShiftAmount](SDValue N, SDValue &Index,
3413 return false;
3414
3415
3416 unsigned ShiftAmt = N.getConstantOperandVal(1);
3417 if (ShiftAmt > MaxShiftAmount)
3418 return false;
3419
3420 Index = N.getOperand(0);
3421 Shift = CurDAG->getTargetConstant(ShiftAmt, SDLoc(N), VT);
3422 return true;
3423 };
3424
3426
3427 if (LHS.getOpcode() == ISD::ADD &&
3429 isInt<12>(C1->getSExtValue())) {
3430 if (SelectShl(LHS.getOperand(1), Index, Scale) &&
3432 SDValue C1Val = CurDAG->getTargetConstant(*C1->getConstantIntValue(),
3433 SDLoc(Addr), VT);
3435 LHS.getOperand(0), C1Val),
3436 0);
3437 return true;
3438 }
3439
3440
3441 if (SelectShl(LHS.getOperand(0), Index, Scale) &&
3443 SDValue C1Val = CurDAG->getTargetConstant(*C1->getConstantIntValue(),
3444 SDLoc(Addr), VT);
3446 LHS.getOperand(1), C1Val),
3447 0);
3448 return true;
3449 }
3450 }
3451
3452
3453
3454
3455 return false;
3456 }
3457
3458
3459 if (SelectShl(RHS, Index, Scale)) {
3461 return false;
3463 return true;
3464 }
3465
3466
3467 if (SelectShl(LHS, Index, Scale)) {
3469 return false;
3471 return true;
3472 }
3473
3475 return false;
3476
3478 Index = RHS;
3479 Scale = CurDAG->getTargetConstant(0, SDLoc(Addr), VT);
3480 return true;
3481}
3482
3484 unsigned MaxShiftAmount,
3489 return false;
3490
3491 if (Index.getOpcode() == ISD::AND) {
3494 Index = Index.getOperand(0);
3495 return true;
3496 }
3497 }
3498
3499 return false;
3500}
3501
3505 return false;
3506
3508 return false;
3509
3512 return true;
3513}
3514
3517 ShAmt = N;
3518
3519
3522
3523
3524
3525
3529
3530
3531
3534
3537 } else {
3538
3539
3542 return true;
3544 }
3545 }
3546
3550
3551
3552 if (Imm != 0 && Imm % ShiftWidth == 0) {
3554 return true;
3555 }
3559
3560
3561 if (Imm != 0 && Imm % ShiftWidth == 0) {
3564 SDValue Zero = CurDAG->getRegister(RISCV::X0, VT);
3565 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
3568 ShAmt = SDValue(Neg, 0);
3569 return true;
3570 }
3571
3572
3573 if (Imm % ShiftWidth == ShiftWidth - 1) {
3578 CurDAG->getAllOnesConstant(DL, VT, true));
3579 ShAmt = SDValue(Not, 0);
3580 return true;
3581 }
3582 }
3583
3584 return true;
3585}
3586
3587
3588
3589
3590
3591
3595 "Unexpected condition code!");
3596
3597
3599 return false;
3600
3601
3603 if (CCVal != ExpectedCCVal)
3604 return false;
3605
3606 SDValue LHS = N->getOperand(0);
3607 SDValue RHS = N->getOperand(1);
3608
3609 if (!LHS.getValueType().isScalarInteger())
3610 return false;
3611
3612
3614 Val = LHS;
3615 return true;
3616 }
3617
3619
3621 int64_t CVal = C->getSExtValue();
3622
3623
3624 if (CVal == -2048) {
3626 CurDAG->getMachineNode(
3627 RISCV::XORI, DL, N->getValueType(0), LHS,
3628 CurDAG->getSignedTargetConstant(CVal, DL, N->getValueType(0))),
3629 0);
3630 return true;
3631 }
3632
3633
3634 if (isInt<12>(CVal) || CVal == 2048) {
3635 unsigned Opc = RISCV::ADDI;
3637 cast(LHS.getOperand(1))->getVT() == MVT::i32) {
3638 Opc = RISCV::ADDIW;
3639 LHS = LHS.getOperand(0);
3640 }
3641
3643 CurDAG->getSignedTargetConstant(
3644 -CVal, DL, N->getValueType(0))),
3645 0);
3646 return true;
3647 }
3648 if (isPowerOf2_64(CVal) && Subtarget->hasStdExtZbs()) {
3650 CurDAG->getMachineNode(
3651 RISCV::BINVI, DL, N->getValueType(0), LHS,
3652 CurDAG->getTargetConstant(Log2_64(CVal), DL, N->getValueType(0))),
3653 0);
3654 return true;
3655 }
3656
3657
3658
3659 if (Subtarget->hasVendorXqcilia() && isInt<26>(CVal) &&
3660 (CVal & 0xFFF) != 0) {
3662 CurDAG->getMachineNode(
3663 RISCV::QC_E_ADDI, DL, N->getValueType(0), LHS,
3664 CurDAG->getSignedTargetConstant(-CVal, DL, N->getValueType(0))),
3665 0);
3666 return true;
3667 }
3668 }
3669
3670
3671
3673 CurDAG->getMachineNode(RISCV::XOR, DL, N->getValueType(0), LHS, RHS), 0);
3674 return true;
3675}
3676
3679 cast(N.getOperand(1))->getVT().getSizeInBits() == Bits) {
3680 Val = N.getOperand(0);
3681 return true;
3682 }
3683
3684 auto UnwrapShlSra = [](SDValue N, unsigned ShiftAmt) {
3686 return N;
3687
3688 SDValue N0 = N.getOperand(0);
3690 N.getConstantOperandVal(1) == ShiftAmt &&
3693
3694 return N;
3695 };
3696
3697 MVT VT = N.getSimpleValueType();
3700 return true;
3701 }
3702
3703 return false;
3704}
3705
3707 if (N.getOpcode() == ISD::AND) {
3710 Val = N.getOperand(0);
3711 return true;
3712 }
3713 }
3714 MVT VT = N.getSimpleValueType();
3716 if (CurDAG->MaskedValueIsZero(N, Mask)) {
3717 Val = N;
3718 return true;
3719 }
3720
3721 return false;
3722}
3723
3724
3725
3726
3730 SDValue N0 = N.getOperand(0);
3731
3735 uint64_t Mask = N.getConstantOperandVal(1);
3737
3738 unsigned XLen = Subtarget->getXLen();
3739 if (LeftShift)
3741 else
3743
3747 if (Trailing != ShAmt)
3748 return false;
3749
3750 unsigned Opcode;
3751
3752
3753
3754 if (LeftShift && Leading == 0 && C2 < Trailing)
3755 Opcode = RISCV::SRLI;
3756
3757
3758
3759 else if (LeftShift && Leading == 32 - C2 && C2 < Trailing)
3760 Opcode = RISCV::SRLIW;
3761
3762
3763
3764 else if (!LeftShift && Leading == C2)
3765 Opcode = RISCV::SRLI;
3766
3767
3768
3769 else if (!LeftShift && Leading == 32 + C2)
3770 Opcode = RISCV::SRLIW;
3771 else
3772 return false;
3773
3775 EVT VT = N.getValueType();
3776 ShAmt = LeftShift ? Trailing - C2 : Trailing + C2;
3779 CurDAG->getTargetConstant(ShAmt, DL, VT)),
3780 0);
3781 return true;
3782 }
3785 uint64_t Mask = N.getConstantOperandVal(1);
3787
3788
3789
3790
3791
3793 unsigned XLen = Subtarget->getXLen();
3796 if (C2 > Leading && Leading > 0 && Trailing == ShAmt) {
3798 EVT VT = N.getValueType();
3801 CurDAG->getTargetConstant(C2 - Leading, DL, VT)),
3802 0);
3804 RISCV::SRLI, DL, VT, Val,
3805 CurDAG->getTargetConstant(Leading + ShAmt, DL, VT)),
3806 0);
3807 return true;
3808 }
3809 }
3810 }
3811 } else if (bool LeftShift = N.getOpcode() == ISD::SHL;
3812 (LeftShift || N.getOpcode() == ISD::SRL) &&
3814 SDValue N0 = N.getOperand(0);
3819 unsigned C1 = N.getConstantOperandVal(1);
3820 unsigned XLen = Subtarget->getXLen();
3823
3824
3825 if (LeftShift && Leading == 32 && Trailing > 0 &&
3826 (Trailing + C1) == ShAmt) {
3828 EVT VT = N.getValueType();
3831 CurDAG->getTargetConstant(Trailing, DL, VT)),
3832 0);
3833 return true;
3834 }
3835
3836
3837 if (!LeftShift && Leading == 32 && Trailing > C1 &&
3838 (Trailing - C1) == ShAmt) {
3840 EVT VT = N.getValueType();
3843 CurDAG->getTargetConstant(Trailing, DL, VT)),
3844 0);
3845 return true;
3846 }
3847 }
3848 }
3849 }
3850
3851 return false;
3852}
3853
3854
3855
3856
3860 N.hasOneUse()) {
3861 SDValue N0 = N.getOperand(0);
3864 uint64_t Mask = N.getConstantOperandVal(1);
3866
3868
3869
3870
3871
3875 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
3877 EVT VT = N.getValueType();
3880 CurDAG->getTargetConstant(C2 - ShAmt, DL, VT)),
3881 0);
3882 return true;
3883 }
3884 }
3885 }
3886 }
3887
3888 return false;
3889}
3890
3892 assert(N->getOpcode() == ISD::OR || N->getOpcode() == RISCVISD::OR_VL);
3893 if (N->getFlags().hasDisjoint())
3894 return true;
3895 return CurDAG->haveNoCommonBitsSet(N->getOperand(0), N->getOperand(1));
3896}
3897
3898bool RISCVDAGToDAGISel::selectImm64IfCheaper(int64_t Imm, int64_t OrigImm,
3901 true);
3903 true);
3904 if (OrigCost <= Cost)
3905 return false;
3906
3908 return true;
3909}
3910
3913 return false;
3915 if ((Imm >> 31) != 1)
3916 return false;
3917
3918 for (const SDNode *U : N->users()) {
3919 switch (U->getOpcode()) {
3921 break;
3924 break;
3925 return false;
3926 default:
3927 return false;
3928 }
3929 }
3930
3931 return selectImm64IfCheaper(0xffffffff00000000 | Imm, Imm, N, Val);
3932}
3933
3936 return false;
3939 return false;
3940
3941 for (const SDNode *U : N->users()) {
3942 switch (U->getOpcode()) {
3944 break;
3945 case RISCVISD::VMV_V_X_VL:
3946 if ((U->users(), [](const SDNode *V) {
3947 return V->getOpcode() == ISD::ADD ||
3948 V->getOpcode() == RISCVISD::ADD_VL;
3949 }))
3950 return false;
3951 break;
3952 default:
3953 return false;
3954 }
3955 }
3956
3957 return selectImm64IfCheaper(-Imm, Imm, N, Val);
3958}
3959
3962 return false;
3964
3965
3966 if (isInt<32>(Imm) && ((Imm & 0xfff) != 0xfff || Imm == -1))
3967 return false;
3968
3969
3970 for (const SDNode *U : N->users()) {
3971 switch (U->getOpcode()) {
3975 if (!(Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbkb()))
3976 return false;
3977 break;
3978 case RISCVISD::VMV_V_X_VL:
3979 if (!Subtarget->hasStdExtZvkb())
3980 return false;
3981 if ((U->users(), [](const SDNode *V) {
3982 return V->getOpcode() == ISD::AND ||
3983 V->getOpcode() == RISCVISD::AND_VL;
3984 }))
3985 return false;
3986 break;
3987 default:
3988 return false;
3989 }
3990 }
3991
3993 Val =
3995 return true;
3996 }
3997
3998
3999
4000 return selectImm64IfCheaper(~Imm, Imm, N, Val);
4001}
4002
4004 unsigned Bits,
4007
4008 if (!MCOpcode)
4009 return false;
4010
4014 return false;
4016
4018 bool HasChainOp = User->getOperand(ChainOpIdx).getValueType() == MVT::Other;
4020 unsigned VLIdx = User->getNumOperands() - HasVecPolicyOp - HasChainOp - 2;
4021 const unsigned Log2SEW = User->getConstantOperandVal(VLIdx + 1);
4022
4023 if (UserOpNo == VLIdx)
4024 return false;
4025
4026 auto NumDemandedBits =
4028 return NumDemandedBits && Bits >= *NumDemandedBits;
4029}
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4041 const unsigned Depth) const {
4048 "Unexpected opcode");
4049
4051 return false;
4052
4053
4054
4055 if (Depth == 0 && ->getValueType(0).isScalarInteger())
4056 return false;
4057
4060
4061 if (->isMachineOpcode())
4062 return false;
4063
4064
4065 switch (User->getMachineOpcode()) {
4066 default:
4068 break;
4069 return false;
4070 case RISCV::ADDW:
4071 case RISCV::ADDIW:
4072 case RISCV::SUBW:
4073 case RISCV::MULW:
4074 case RISCV::SLLW:
4075 case RISCV::SLLIW:
4076 case RISCV::SRAW:
4077 case RISCV::SRAIW:
4078 case RISCV::SRLW:
4079 case RISCV::SRLIW:
4080 case RISCV::DIVW:
4081 case RISCV::DIVUW:
4082 case RISCV::REMW:
4083 case RISCV::REMUW:
4084 case RISCV::ROLW:
4085 case RISCV::RORW:
4086 case RISCV::RORIW:
4087 case RISCV::CLZW:
4088 case RISCV::CTZW:
4089 case RISCV::CPOPW:
4090 case RISCV::SLLI_UW:
4091 case RISCV::ABSW:
4092 case RISCV::FMV_W_X:
4093 case RISCV::FCVT_H_W:
4094 case RISCV::FCVT_H_W_INX:
4095 case RISCV::FCVT_H_WU:
4096 case RISCV::FCVT_H_WU_INX:
4097 case RISCV::FCVT_S_W:
4098 case RISCV::FCVT_S_W_INX:
4099 case RISCV::FCVT_S_WU:
4100 case RISCV::FCVT_S_WU_INX:
4101 case RISCV::FCVT_D_W:
4102 case RISCV::FCVT_D_W_INX:
4103 case RISCV::FCVT_D_WU:
4104 case RISCV::FCVT_D_WU_INX:
4105 case RISCV::TH_REVW:
4106 case RISCV::TH_SRRIW:
4107 if (Bits >= 32)
4108 break;
4109 return false;
4110 case RISCV::SLL:
4111 case RISCV::SRA:
4112 case RISCV::SRL:
4113 case RISCV::ROL:
4114 case RISCV::ROR:
4115 case RISCV::BSET:
4116 case RISCV::BCLR:
4117 case RISCV::BINV:
4118
4120 break;
4121 return false;
4122 case RISCV::SLLI:
4123
4124 if (Bits >= Subtarget->getXLen() - User->getConstantOperandVal(1))
4125 break;
4126 return false;
4127 case RISCV::ANDI:
4129 break;
4130 goto RecCheck;
4131 case RISCV::ORI: {
4134 break;
4135 [[fallthrough]];
4136 }
4137 case RISCV::AND:
4138 case RISCV::OR:
4139 case RISCV::XOR:
4140 case RISCV::XORI:
4141 case RISCV::ANDN:
4142 case RISCV::ORN:
4143 case RISCV::XNOR:
4144 case RISCV::SH1ADD:
4145 case RISCV::SH2ADD:
4146 case RISCV::SH3ADD:
4147 RecCheck:
4149 break;
4150 return false;
4151 case RISCV::SRLI: {
4152 unsigned ShAmt = User->getConstantOperandVal(1);
4153
4154
4155
4157 break;
4158 return false;
4159 }
4160 case RISCV::SEXT_B:
4161 case RISCV::PACKH:
4162 if (Bits >= 8)
4163 break;
4164 return false;
4165 case RISCV::SEXT_H:
4166 case RISCV::FMV_H_X:
4167 case RISCV::ZEXT_H_RV32:
4168 case RISCV::ZEXT_H_RV64:
4169 case RISCV::PACKW:
4170 if (Bits >= 16)
4171 break;
4172 return false;
4173 case RISCV::PACK:
4174 if (Bits >= (Subtarget->getXLen() / 2))
4175 break;
4176 return false;
4177 case RISCV::ADD_UW:
4178 case RISCV::SH1ADD_UW:
4179 case RISCV::SH2ADD_UW:
4180 case RISCV::SH3ADD_UW:
4181
4182
4184 break;
4185 return false;
4186 case RISCV::SB:
4188 break;
4189 return false;
4190 case RISCV::SH:
4192 break;
4193 return false;
4194 case RISCV::SW:
4196 break;
4197 return false;
4198 case RISCV::TH_EXT:
4199 case RISCV::TH_EXTU: {
4200 unsigned Msb = User->getConstantOperandVal(1);
4201 unsigned Lsb = User->getConstantOperandVal(2);
4202
4203 if (Msb >= Lsb && Bits > Msb)
4204 break;
4205 return false;
4206 }
4207 }
4208 }
4209
4210 return true;
4211}
4212
4213
4217 if ()
4218 return false;
4219
4220 int64_t Offset = C->getSExtValue();
4221 for (unsigned Shift = 0; Shift < 4; Shift++) {
4223 EVT VT = N->getValueType(0);
4224 Simm5 = CurDAG->getSignedTargetConstant(Offset >> Shift, SDLoc(N), VT);
4225 Shl2 = CurDAG->getTargetConstant(Shift, SDLoc(N), VT);
4226 return true;
4227 }
4228 }
4229
4230 return false;
4231}
4232
4233
4234
4237 if (C && isUInt<5>(C->getZExtValue())) {
4238 VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
4239 N->getValueType(0));
4240 } else if (C && C->isAllOnes()) {
4241
4243 N->getValueType(0));
4246
4247
4248
4249
4251 N->getValueType(0));
4252 } else {
4253 VL = N;
4254 }
4255
4256 return true;
4257}
4258
4261 if (.getOperand(0).isUndef())
4264 }
4266 if ((Splat.getOpcode() != RISCVISD::VMV_V_X_VL &&
4267 Splat.getOpcode() != RISCVISD::VMV_S_X_VL) ||
4268 .getOperand(0).isUndef())
4270 assert(Splat.getNumOperands() == 3 && "Unexpected number of operands");
4272}
4273
4277 return false;
4278
4279 SplatVal = Splat.getOperand(1);
4280 return true;
4281}
4282
4286 std::function<bool(int64_t)> ValidateImm,
4287 bool Decrement = false) {
4290 return false;
4291
4292 const unsigned SplatEltSize = Splat.getScalarValueSizeInBits();
4294 "Unexpected splat operand type");
4295
4296
4297
4298
4299
4300
4301
4302
4303 APInt SplatConst = Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
4304
4305 int64_t SplatImm = SplatConst.getSExtValue();
4306
4307 if (!ValidateImm(SplatImm))
4308 return false;
4309
4310 if (Decrement)
4311 SplatImm -= 1;
4312
4313 SplatVal =
4315 return true;
4316}
4317
4320 [](int64_t Imm) { return isInt<5>(Imm); });
4321}
4322
4325 N, SplatVal, *CurDAG, *Subtarget,
4326 [](int64_t Imm) { return Imm >= -15 && Imm <= 16; },
4327 true);
4328}
4329
4332 N, SplatVal, *CurDAG, *Subtarget,
4333 [](int64_t Imm) { return Imm >= -15 && Imm <= 16; },
4334 false);
4335}
4336
4340 N, SplatVal, *CurDAG, *Subtarget,
4341 [](int64_t Imm) { return Imm != 0 && Imm >= -15 && Imm <= 16; },
4342 true);
4343}
4344
4348 N, SplatVal, *CurDAG, *Subtarget,
4349 [Bits](int64_t Imm) { return isUIntN(Bits, Imm); });
4350}
4351
4356
4358 auto IsExtOrTrunc = [](SDValue N) {
4359 switch (N->getOpcode()) {
4362
4363
4364 case RISCVISD::TRUNCATE_VECTOR_VL:
4365 case RISCVISD::VSEXT_VL:
4366 case RISCVISD::VZEXT_VL:
4367 return true;
4368 default:
4369 return false;
4370 }
4371 };
4372
4373
4374 while (IsExtOrTrunc(N)) {
4375 if (.hasOneUse() || N.getScalarValueSizeInBits() < 8)
4376 return false;
4378 }
4379
4381}
4382
4384
4385 if (N.getOpcode() == ISD::BITCAST &&
4386 N.getOperand(0).getValueType() == Subtarget->getXLenVT()) {
4387 Imm = N.getOperand(0);
4388 return true;
4389 }
4390
4391 if (N.getOpcode() == RISCVISD::FMV_H_X ||
4392 N.getOpcode() == RISCVISD::FMV_W_X_RV64) {
4393 Imm = N.getOperand(0);
4394 return true;
4395 }
4396
4397
4399 if (!CFP)
4400 return false;
4402
4404 return false;
4405
4407
4408 MVT XLenVT = Subtarget->getXLenVT();
4409 if (VT == MVT::f64 && !Subtarget->is64Bit()) {
4411 return false;
4412 }
4415 *Subtarget);
4416 return true;
4417}
4418
4422 int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
4423
4425 return false;
4426
4427 Imm = CurDAG->getSignedTargetConstant(ImmVal, SDLoc(N),
4428 Subtarget->getXLenVT());
4429 return true;
4430 }
4431
4432 return false;
4433}
4434
4435
4436
4437bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
4438
4439 if (N->getMachineOpcode() != RISCV::ADDIW ||
4441 return false;
4442
4443 SDValue N0 = N->getOperand(0);
4445 return false;
4446
4448 default:
4449 break;
4450 case RISCV::ADD:
4451 case RISCV::ADDI:
4452 case RISCV::SUB:
4453 case RISCV::MUL:
4454 case RISCV::SLLI: {
4455
4456
4457 unsigned Opc;
4459 default:
4461 case RISCV::ADD: Opc = RISCV::ADDW; break;
4462 case RISCV::ADDI: Opc = RISCV::ADDIW; break;
4463 case RISCV::SUB: Opc = RISCV::SUBW; break;
4464 case RISCV::MUL: Opc = RISCV::MULW; break;
4465 case RISCV::SLLI: Opc = RISCV::SLLIW; break;
4466 }
4467
4470
4471
4474 break;
4475
4478 N00, N01);
4480 return true;
4481 }
4482 case RISCV::ADDW:
4483 case RISCV::ADDIW:
4484 case RISCV::SUBW:
4485 case RISCV::MULW:
4486 case RISCV::SLLIW:
4487 case RISCV::PACKW:
4488 case RISCV::TH_MULAW:
4489 case RISCV::TH_MULAH:
4490 case RISCV::TH_MULSW:
4491 case RISCV::TH_MULSH:
4493 break;
4494
4495
4496
4498 return true;
4499 }
4500
4501 return false;
4502}
4503
4505 const auto IsVMSet = [](unsigned Opc) {
4506 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
4507 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
4508 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
4509 Opc == RISCV::PseudoVMSET_M_B8;
4510 };
4511
4512
4513
4514
4516}
4517
4519 if (!V.isMachineOpcode())
4520 return false;
4521 if (V.getMachineOpcode() == TargetOpcode::REG_SEQUENCE) {
4522 for (unsigned I = 1; I < V.getNumOperands(); I += 2)
4524 return false;
4525 return true;
4526 }
4527 return V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
4528}
4529
4530
4531
4532bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(MachineSDNode *N) {
4533 const RISCV::RISCVMaskedPseudoInfo *I =
4534 RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
4535 if ()
4536 return false;
4537
4538 unsigned MaskOpIdx = I->MaskOpIdx;
4540 return false;
4541
4542
4543
4544 const unsigned Opc = I->UnmaskedPseudo;
4545 const MCInstrDesc &MCID = TII->get(Opc);
4547
4548 const MCInstrDesc &MaskedMCID = TII->get(N->getMachineOpcode());
4550
4553 "Unmasked pseudo has policy but masked pseudo doesn't?");
4555 "Unexpected pseudo structure");
4556 assert(!(HasPassthru && !MaskedHasPassthru) &&
4557 "Unmasked pseudo has passthru but masked pseudo doesn't?");
4558
4560
4561 bool ShouldSkip = !HasPassthru && MaskedHasPassthru;
4564 bool HasChainOp =
4565 N->getOperand(N->getNumOperands() - 1).getValueType() == MVT::Other;
4566 unsigned LastOpNum = N->getNumOperands() - 1 - HasChainOp;
4567 for (unsigned I = ShouldSkip, E = N->getNumOperands(); I != E; I++) {
4568
4570 if (I == MaskOpIdx)
4571 continue;
4572 if (DropPolicy && I == LastOpNum)
4573 continue;
4575 }
4576
4577 MachineSDNode *Result =
4578 CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
4579
4580 if (->memoperands_empty())
4581 CurDAG->setNodeMemRefs(Result, N->memoperands());
4582
4583 Result->setFlags(N->getFlags());
4585
4586 return true;
4587}
4588
4589
4590
4591
4592
4593
4594bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
4595 bool MadeChange = false;
4597
4598 while (Position != CurDAG->allnodes_begin()) {
4599 SDNode *N = &*--Position;
4600 if (N->use_empty() || ->isMachineOpcode())
4601 continue;
4602
4603 const unsigned Opc = N->getMachineOpcode();
4604 if (!RISCVVPseudosTable::getPseudoInfo(Opc) ||
4607 continue;
4608
4610 Ops.push_back(CurDAG->getRegister(RISCV::NoRegister, N->getValueType(0)));
4611 for (unsigned I = 1, E = N->getNumOperands(); I != E; I++) {
4614 }
4615
4616 MachineSDNode *Result =
4617 CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
4618 Result->setFlags(N->getFlags());
4621 MadeChange = true;
4622 }
4623 return MadeChange;
4624}
4625
4626
4627
4628
4633
4635
4640
static SDValue Widen(SelectionDAG *CurDAG, SDValue N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const TargetInstrInfo & TII
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static bool getVal(MDTuple *MD, const char *Key, uint64_t &Val)
static bool usesAllOnesMask(SDValue MaskOp)
Definition RISCVISelDAGToDAG.cpp:4504
static Register getTileReg(uint64_t TileNum)
Definition RISCVISelDAGToDAG.cpp:890
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
Definition RISCVISelDAGToDAG.cpp:210
static bool isRegRegScaleLoadOrStore(SDNode *User, SDValue Add, const RISCVSubtarget &Subtarget)
Return true if this a load/store that we have a RegRegScale instruction for.
Definition RISCVISelDAGToDAG.cpp:3346
#define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix)
static bool isWorthFoldingAdd(SDValue Add)
Definition RISCVISelDAGToDAG.cpp:3060
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
Definition RISCVISelDAGToDAG.cpp:181
static bool isImplicitDef(SDValue V)
Definition RISCVISelDAGToDAG.cpp:4518
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm, bool Decrement=false)
Definition RISCVISelDAGToDAG.cpp:4283
static unsigned getSegInstNF(unsigned Intrinsic)
Definition RISCVISelDAGToDAG.cpp:954
static bool isWorthFoldingIntoRegRegScale(const RISCVSubtarget &Subtarget, SDValue Add, SDValue Shift=SDValue())
Is it profitable to fold this Add into RegRegScale load/store.
Definition RISCVISelDAGToDAG.cpp:3369
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
Definition RISCVISelDAGToDAG.cpp:4003
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset, bool IsPrefetch=false)
Definition RISCVISelDAGToDAG.cpp:3008
#define INST_ALL_NF_CASE_WITH_FF(NAME)
#define CASE_VMSLT_OPCODES(lmulenum, suffix)
bool isRegImmLoadOrStore(SDNode *User, SDValue Add)
Definition RISCVISelDAGToDAG.cpp:3089
static cl::opt< bool > UsePseudoMovImm("riscv-use-rematerializable-movimm", cl::Hidden, cl::desc("Use a rematerializable pseudoinstruction for 2 instruction " "constant materialization"), cl::init(false))
static SDValue findVSplat(SDValue N)
Definition RISCVISelDAGToDAG.cpp:4259
#define INST_ALL_NF_CASE(NAME)
static bool isApplicableToPLI(int Val)
Definition RISCVISelDAGToDAG.cpp:996
Contains matchers for matching SelectionDAG nodes and values.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
DEMANGLE_DUMP_METHOD void dump() const
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
const SDValue & getValue() const
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
RISCVDAGToDAGISelLegacy(RISCVTargetMachine &TargetMachine, CodeGenOptLevel OptLevel)
Definition RISCVISelDAGToDAG.cpp:4636
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
Definition RISCVISelDAGToDAG.cpp:3592
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
Definition RISCVISelDAGToDAG.cpp:3677
bool selectNegImm(SDValue N, SDValue &Val)
Definition RISCVISelDAGToDAG.cpp:3934
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
Definition RISCVISelDAGToDAG.cpp:3706
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
Definition RISCVISelDAGToDAG.cpp:3857
bool areOffsetsWithinAlignment(SDValue Addr, Align Alignment)
Definition RISCVISelDAGToDAG.cpp:3120
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
Definition RISCVISelDAGToDAG.cpp:4040
bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the least significant 5 bits of Offset should be all zeros.
Definition RISCVISelDAGToDAG.cpp:3280
bool selectZExtImm32(SDValue N, SDValue &Val)
Definition RISCVISelDAGToDAG.cpp:3911
bool SelectAddrRegZextRegScale(SDValue Addr, unsigned MaxShiftAmount, unsigned Bits, SDValue &Base, SDValue &Index, SDValue &Scale)
Definition RISCVISelDAGToDAG.cpp:3483
bool SelectAddrRegReg(SDValue Addr, SDValue &Base, SDValue &Offset)
Definition RISCVISelDAGToDAG.cpp:3502
void selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered)
Definition RISCVISelDAGToDAG.cpp:417
void selectVLSEGFF(SDNode *Node, unsigned NF, bool IsMasked)
Definition RISCVISelDAGToDAG.cpp:313
bool selectVSplatSimm5Plus1NoDec(SDValue N, SDValue &SplatVal)
Definition RISCVISelDAGToDAG.cpp:4330
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
Definition RISCVISelDAGToDAG.cpp:4214
void selectSF_VC_X_SE(SDNode *Node)
Definition RISCVISelDAGToDAG.cpp:895
bool orDisjoint(const SDNode *Node) const
Definition RISCVISelDAGToDAG.cpp:3891
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
Definition RISCVISelDAGToDAG.cpp:4357
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
Definition RISCVISelDAGToDAG.cpp:2968
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
Definition RISCVISelDAGToDAG.cpp:4318
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
Definition RISCVISelDAGToDAG.cpp:4419
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
Definition RISCVISelDAGToDAG.cpp:2996
bool tryUnsignedBitfieldInsertInZero(SDNode *Node, const SDLoc &DL, MVT VT, SDValue X, unsigned Msb, unsigned Lsb)
Definition RISCVISelDAGToDAG.cpp:801
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
Definition RISCVISelDAGToDAG.cpp:42
bool selectInvLogicImm(SDValue N, SDValue &Val)
Definition RISCVISelDAGToDAG.cpp:3960
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
Definition RISCVISelDAGToDAG.cpp:3148
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
Definition RISCVISelDAGToDAG.cpp:1008
void selectXSfmmVSET(SDNode *Node)
Definition RISCVISelDAGToDAG.cpp:521
bool trySignedBitfieldInsertInSign(SDNode *Node)
Definition RISCVISelDAGToDAG.cpp:720
bool selectVSplat(SDValue N, SDValue &SplatVal)
Definition RISCVISelDAGToDAG.cpp:4274
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
Definition RISCVISelDAGToDAG.cpp:245
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
Definition RISCVISelDAGToDAG.cpp:148
bool SelectAddrRegImm9(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the offset is restricted to uimm9.
Definition RISCVISelDAGToDAG.cpp:3253
bool selectScalarFPAsInt(SDValue N, SDValue &Imm)
Definition RISCVISelDAGToDAG.cpp:4383
bool hasAllBUsers(SDNode *Node) const
void selectVLSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided)
Definition RISCVISelDAGToDAG.cpp:285
bool tryShrinkShlLogicImm(SDNode *Node)
Definition RISCVISelDAGToDAG.cpp:559
void selectVSETVLI(SDNode *Node)
Definition RISCVISelDAGToDAG.cpp:463
bool selectVLOp(SDValue N, SDValue &VL)
Definition RISCVISelDAGToDAG.cpp:4235
bool trySignedBitfieldExtract(SDNode *Node)
Definition RISCVISelDAGToDAG.cpp:637
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
Definition RISCVISelDAGToDAG.cpp:4323
void selectVSSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided)
Definition RISCVISelDAGToDAG.cpp:392
bool selectVSplatImm64Neg(SDValue N, SDValue &SplatVal)
Definition RISCVISelDAGToDAG.cpp:4352
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
Definition RISCVISelDAGToDAG.cpp:4337
bool tryUnsignedBitfieldExtract(SDNode *Node, const SDLoc &DL, MVT VT, SDValue X, unsigned Msb, unsigned Lsb)
Definition RISCVISelDAGToDAG.cpp:773
void selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered)
Definition RISCVISelDAGToDAG.cpp:344
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
Definition RISCVISelDAGToDAG.cpp:3515
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
Definition RISCVISelDAGToDAG.cpp:3727
bool tryIndexedLoad(SDNode *Node)
Definition RISCVISelDAGToDAG.cpp:821
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
Definition RISCVISelDAGToDAG.cpp:3400
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
Definition RISCVISelDAGToDAG.cpp:4345
bool hasShlAdd(int64_t ShAmt) const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
iterator_range< user_iterator > users()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
SelectionDAGISelLegacy(char &ID, std::unique_ptr< SelectionDAGISel > S)
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
static constexpr unsigned MaxRecursionDepth
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
ilist< SDNode >::iterator allnodes_iterator
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A Use represents the edge between a Value definition and its users.
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
iterator_range< user_iterator > users()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost, bool FreeZeroes)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
SmallVector< Inst, 8 > InstSeq
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned encodeXSfmmVType(unsigned SEW, unsigned Widen, bool AltFmt)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
static unsigned decodeTWiden(unsigned TWiden)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
static constexpr unsigned RVVBitsPerBlock
static constexpr int64_t VLMaxSentinel
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
decltype(auto) dyn_cast(const From &Val)
dyn_cast - Return the argument parameter cast to the specified type.
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
static const MachineMemOperand::Flags MONontemporalBit0
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOptLevel OptLevel)
Definition RISCVISelDAGToDAG.cpp:4629
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.