LLVM: lib/Target/Mips/MipsISelLowering.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
69#include
70#include
71#include
72#include
73#include
74#include
75#include
76#include
77#include
78#include
79
80using namespace llvm;
81
82#define DEBUG_TYPE "mips-lower"
83
84STATISTIC(NumTailCalls, "Number of tail calls");
85
88 cl::desc("MIPS: Don't trap on integer division by zero."),
90
92
94 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
95 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
96};
97
98
99
102 EVT VT) const {
105
108 : MVT::i64;
110}
111
114 EVT VT) const {
120 }
122}
123
126 unsigned &NumIntermediates, MVT &RegisterVT) const {
129 RegisterVT = IntermediateVT.getSimpleVT();
131 return NumIntermediates;
132 }
136 return NumIntermediates * getNumRegisters(Context, IntermediateVT);
137}
138
144
147 unsigned Flag) const {
149}
150
153 unsigned Flag) const {
155}
156
159 unsigned Flag) const {
161}
162
165 unsigned Flag) const {
167}
168
171 unsigned Flag) const {
173 N->getOffset(), Flag);
174}
175
179
180
183
184
188
189
194 }
195
196
197
201 }
202
203
208 }
209
212
214
215
216
217
218
220
221
241
246
250
251
265 } else {
268 }
269
280 } else {
283 }
290 }
291
296 }
297
301
310
311
325
329 } else {
332 }
339
342
345
362
363
368
370
375
376
379
383 }
384
388 }
389
390
395
400
406 } else if (Subtarget.isGP64bit()) {
411 }
412
414
418
421 else
423
425
426
427
430
432
434
435 isMicroMips = Subtarget.inMicroMipsMode();
436}
437
446
447
453
454
458
459
460
461 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
463 UseFastISel = false;
464
466}
467
469 EVT VT) const {
471 return MVT::i32;
473}
474
480
481 EVT Ty = N->getValueType(0);
482 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
483 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
484 unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
485 MipsISD::DivRemU16;
487
489 N->getOperand(0), N->getOperand(1));
492
493
494 if (N->hasAnyUseOfValue(0)) {
496 InGlue);
498 InChain = CopyFromLo.getValue(1);
499 InGlue = CopyFromLo.getValue(2);
500 }
501
502
503 if (N->hasAnyUseOfValue(1)) {
505 HI, Ty, InGlue);
507 }
508
510}
511
513 switch (CC) {
535 }
536}
537
538
539
542 return false;
543
545 "Illegal Condition Code");
546
547 return true;
548}
549
550
551
553
556 return Op;
557
559
560 if (.getValueType().isFloatingPoint())
561 return Op;
562
565
566
567
569
570 return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS,
572}
573
574
580
581 return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
583}
584
590
591 SDValue SetCC = N->getOperand(0);
592
596
597 SDValue False = N->getOperand(2);
599
602
604
605
606
607
608
609
610
611
612 if (!FalseC)
614
616
619 SDValue True = N->getOperand(1);
620
624
626 }
627
628
629
630 SDValue True = N->getOperand(1);
632
635
636
637
638
641
643
644
645
646
647 if (Diff == 1)
649
650
651
652
653
654 if (Diff == -1) {
660 }
661
662
664}
665
671
672 SDValue ValueIfTrue = N->getOperand(0), ValueIfFalse = N->getOperand(2);
673
677
678
679
680
681
682
683
684
685 unsigned Opc = (N->getOpcode() == MipsISD::CMovFP_T) ? MipsISD::CMovFP_F :
686 MipsISD::CMovFP_T;
687
688 SDValue FCC = N->getOperand(1), Glue = N->getOperand(3);
689 return DAG.getNode(Opc, SDLoc(N), ValueIfFalse.getValueType(),
690 ValueIfFalse, FCC, ValueIfTrue, Glue);
691}
692
698
699 SDValue FirstOperand = N->getOperand(0);
700 unsigned FirstOperandOpc = FirstOperand.getOpcode();
701 SDValue Mask = N->getOperand(1);
702 EVT ValTy = N->getValueType(0);
704
706 unsigned SMPos, SMSize;
709 unsigned Opc;
710
711
715
716 if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {
717
718
719
720
721
724
726
727
728
729 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
731
732 Opc = MipsISD::Ext;
733 NewOperand = FirstOperand.getOperand(0);
734 } else if (FirstOperandOpc == ISD::SHL && Subtarget.hasCnMips()) {
735
736
737
738
739
740
741
744
746
747 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||
748 Pos + SMSize > ValTy.getSizeInBits())
750
751 NewOperand = FirstOperand.getOperand(0);
752
753 SMSize--;
754 Opc = MipsISD::CIns;
755 } else {
756
757
758
759
760
763
764
765 if (SMPos)
767
768 Opc = MipsISD::Ext;
769 NewOperand = FirstOperand;
770 }
771 return DAG.getNode(Opc, DL, ValTy, NewOperand,
774}
775
781
782 SDValue FirstOperand = N->getOperand(0), SecondOperand = N->getOperand(1);
783 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
785
787 SecondOperand.getOpcode() == ISD::SHL) ||
789 SecondOperand.getOpcode() == ISD::AND)) {
790
791
792
793
794
795
800 ? SecondOperand.getOperand(0)
808
810 ? SecondOperand.getOperand(1)
815
816 if (SMPos0 != 0 || SMSize0 != ShlShiftValue)
818
820 EVT ValTy = N->getValueType(0);
821 SMPos1 = ShlShiftValue;
822 assert(SMPos1 < ValTy.getSizeInBits());
823 SMSize1 = (ValTy == MVT::i64 ? 64 : 32) - SMPos1;
824 return DAG.getNode(MipsISD::Ins, DL, ValTy, ShlOperand0,
826 DAG.getConstant(SMSize1, DL, MVT::i32), AndOperand0);
827 }
828
829
832
833
834
835
836
840
841
842 if (SecondOperand.getOpcode() == ISD::AND &&
843 SecondOperand.getOperand(0).getOpcode() == ISD::SHL) {
844
848
849
850 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
852
854
857
859
860
861
862 EVT ValTy = N->getValueType(0);
863 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
865
871 } else {
872
873
874
875
876 if (~CN->getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
877 ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) ||
878 (SMSize0 + SMPos0 <= 32))) {
879
880 bool isConstCase = SecondOperand.getOpcode() != ISD::AND;
881 if (SecondOperand.getOpcode() == ISD::AND) {
884 } else {
887 }
888
889
892
894 EVT ValTy = N->getOperand(0)->getValueType(0);
897 if (!isConstCase) {
900 SecondOperand, Const1);
901 }
903 MipsISD::Ins, DL, N->getValueType(0),
904 isConstCase
906 : SrlX,
908 DAG.getConstant(ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31
909 : SMSize0,
910 DL, MVT::i32),
912 }
914 }
915}
916
919
920
924
925
926
927
928
932
933
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
958
962
966
967
968
971
972
973
974
975
976
979
984
985 if (!IsSigned && !IsUnsigned)
987
988
990 SDValue BottomHalf, TopHalf;
991 std::tie(BottomHalf, TopHalf) =
992 CurDAG.SplitScalar(AddOperand, DL, MVT::i32, MVT::i32);
994 CurDAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, BottomHalf, TopHalf);
995
996
998 unsigned Opcode = IsAdd ? (IsUnsigned ? MipsISD::MAddu : MipsISD::MAdd)
999 : (IsUnsigned ? MipsISD::MSubu : MipsISD::MSub);
1003 SDValue MAdd = CurDAG.getNode(Opcode, DL, MVT::Untyped, MAddOps);
1004
1005 SDValue ResLo = CurDAG.getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
1006 SDValue ResHi = CurDAG.getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
1009 return Combined;
1010}
1011
1015
1018 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1020
1022 }
1023
1025}
1026
1030
1033 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1035
1037 }
1038
1039
1040
1041
1042
1043 SDValue InnerAdd = N->getOperand(1);
1044 SDValue Index = N->getOperand(0);
1049
1052 if (Lo.getOpcode() != MipsISD::Lo)
1054
1055 if ((Lo.getOpcode() != MipsISD::Lo) ||
1058
1059 EVT ValTy = N->getValueType(0);
1061
1064}
1065
1069
1070
1071
1072
1075
1076 SDValue FirstOperand = N->getOperand(0);
1077 unsigned FirstOperandOpc = FirstOperand.getOpcode();
1078 SDValue SecondOperand = N->getOperand(1);
1079 EVT ValTy = N->getValueType(0);
1081
1083 unsigned SMPos, SMSize;
1086
1087
1090
1092
1093 if (Pos >= ValTy.getSizeInBits())
1095
1096 if (FirstOperandOpc != ISD::AND)
1098
1099
1103
1104
1105
1106 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())
1108
1109 NewOperand = FirstOperand.getOperand(0);
1110
1111 SMSize--;
1112
1113 return DAG.getNode(MipsISD::CIns, DL, ValTy, NewOperand,
1116}
1117
1123 }
1124
1125 SDValue N0 = N->getOperand(0);
1126 EVT VT = N->getValueType(0);
1127
1128
1129
1130
1136
1140
1141 int64_t ConstImm = ConstantOperand->getSExtValue();
1144 }
1145
1147}
1148
1150 const {
1152 unsigned Opc = N->getOpcode();
1153
1154 switch (Opc) {
1155 default: break;
1161 case MipsISD::CMovFP_F:
1162 case MipsISD::CMovFP_T:
1176 }
1177
1179}
1180
1184
1188
1190
1191
1192
1194 return C->getAPIntValue().ule(15);
1195
1196 return false;
1197}
1198
1202 N->getOperand(0).getOpcode() == ISD::SRL) ||
1204 N->getOperand(0).getOpcode() == ISD::SHL)) &&
1205 "Expected shift-shift mask");
1206
1207 if (N->getOperand(0).getValueType().isVector())
1208 return false;
1209 return true;
1210}
1211
1212void
1218
1221{
1222 switch (Op.getOpcode())
1223 {
1231 case ISD::SETCC: return lowerSETCC(Op, DAG);
1234 return lowerFSETCC(Op, DAG);
1236 case ISD::VAARG: return lowerVAARG(Op, DAG);
1238 case ISD::FABS: return lowerFABS(Op, DAG);
1240 return lowerFCANONICALIZE(Op, DAG);
1246 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);
1247 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false);
1253 return lowerSTRICT_FP_TO_INT(Op, DAG);
1256 return lowerREADCYCLECOUNTER(Op, DAG);
1257 }
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268static unsigned
1270{
1273 return VReg;
1274}
1275
1279 bool Is64Bit, bool IsMicroMips) {
1281 return &MBB;
1282
1283
1287 MIB = BuildMI(MBB, std::next(I), MI.getDebugLoc(),
1288 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1292
1293
1294 if (Is64Bit)
1296
1297
1299
1300
1301
1302
1303 return &MBB;
1304}
1305
1309 switch (MI.getOpcode()) {
1310 default:
1312 case Mips::ATOMIC_LOAD_ADD_I8:
1313 return emitAtomicBinaryPartword(MI, BB, 1);
1314 case Mips::ATOMIC_LOAD_ADD_I16:
1315 return emitAtomicBinaryPartword(MI, BB, 2);
1316 case Mips::ATOMIC_LOAD_ADD_I32:
1317 return emitAtomicBinary(MI, BB);
1318 case Mips::ATOMIC_LOAD_ADD_I64:
1319 return emitAtomicBinary(MI, BB);
1320
1321 case Mips::ATOMIC_LOAD_AND_I8:
1322 return emitAtomicBinaryPartword(MI, BB, 1);
1323 case Mips::ATOMIC_LOAD_AND_I16:
1324 return emitAtomicBinaryPartword(MI, BB, 2);
1325 case Mips::ATOMIC_LOAD_AND_I32:
1326 return emitAtomicBinary(MI, BB);
1327 case Mips::ATOMIC_LOAD_AND_I64:
1328 return emitAtomicBinary(MI, BB);
1329
1330 case Mips::ATOMIC_LOAD_OR_I8:
1331 return emitAtomicBinaryPartword(MI, BB, 1);
1332 case Mips::ATOMIC_LOAD_OR_I16:
1333 return emitAtomicBinaryPartword(MI, BB, 2);
1334 case Mips::ATOMIC_LOAD_OR_I32:
1335 return emitAtomicBinary(MI, BB);
1336 case Mips::ATOMIC_LOAD_OR_I64:
1337 return emitAtomicBinary(MI, BB);
1338
1339 case Mips::ATOMIC_LOAD_XOR_I8:
1340 return emitAtomicBinaryPartword(MI, BB, 1);
1341 case Mips::ATOMIC_LOAD_XOR_I16:
1342 return emitAtomicBinaryPartword(MI, BB, 2);
1343 case Mips::ATOMIC_LOAD_XOR_I32:
1344 return emitAtomicBinary(MI, BB);
1345 case Mips::ATOMIC_LOAD_XOR_I64:
1346 return emitAtomicBinary(MI, BB);
1347
1348 case Mips::ATOMIC_LOAD_NAND_I8:
1349 return emitAtomicBinaryPartword(MI, BB, 1);
1350 case Mips::ATOMIC_LOAD_NAND_I16:
1351 return emitAtomicBinaryPartword(MI, BB, 2);
1352 case Mips::ATOMIC_LOAD_NAND_I32:
1353 return emitAtomicBinary(MI, BB);
1354 case Mips::ATOMIC_LOAD_NAND_I64:
1355 return emitAtomicBinary(MI, BB);
1356
1357 case Mips::ATOMIC_LOAD_SUB_I8:
1358 return emitAtomicBinaryPartword(MI, BB, 1);
1359 case Mips::ATOMIC_LOAD_SUB_I16:
1360 return emitAtomicBinaryPartword(MI, BB, 2);
1361 case Mips::ATOMIC_LOAD_SUB_I32:
1362 return emitAtomicBinary(MI, BB);
1363 case Mips::ATOMIC_LOAD_SUB_I64:
1364 return emitAtomicBinary(MI, BB);
1365
1366 case Mips::ATOMIC_SWAP_I8:
1367 return emitAtomicBinaryPartword(MI, BB, 1);
1368 case Mips::ATOMIC_SWAP_I16:
1369 return emitAtomicBinaryPartword(MI, BB, 2);
1370 case Mips::ATOMIC_SWAP_I32:
1371 return emitAtomicBinary(MI, BB);
1372 case Mips::ATOMIC_SWAP_I64:
1373 return emitAtomicBinary(MI, BB);
1374
1375 case Mips::ATOMIC_CMP_SWAP_I8:
1376 return emitAtomicCmpSwapPartword(MI, BB, 1);
1377 case Mips::ATOMIC_CMP_SWAP_I16:
1378 return emitAtomicCmpSwapPartword(MI, BB, 2);
1379 case Mips::ATOMIC_CMP_SWAP_I32:
1380 return emitAtomicCmpSwap(MI, BB);
1381 case Mips::ATOMIC_CMP_SWAP_I64:
1382 return emitAtomicCmpSwap(MI, BB);
1383
1384 case Mips::ATOMIC_LOAD_MIN_I8:
1385 return emitAtomicBinaryPartword(MI, BB, 1);
1386 case Mips::ATOMIC_LOAD_MIN_I16:
1387 return emitAtomicBinaryPartword(MI, BB, 2);
1388 case Mips::ATOMIC_LOAD_MIN_I32:
1389 return emitAtomicBinary(MI, BB);
1390 case Mips::ATOMIC_LOAD_MIN_I64:
1391 return emitAtomicBinary(MI, BB);
1392
1393 case Mips::ATOMIC_LOAD_MAX_I8:
1394 return emitAtomicBinaryPartword(MI, BB, 1);
1395 case Mips::ATOMIC_LOAD_MAX_I16:
1396 return emitAtomicBinaryPartword(MI, BB, 2);
1397 case Mips::ATOMIC_LOAD_MAX_I32:
1398 return emitAtomicBinary(MI, BB);
1399 case Mips::ATOMIC_LOAD_MAX_I64:
1400 return emitAtomicBinary(MI, BB);
1401
1402 case Mips::ATOMIC_LOAD_UMIN_I8:
1403 return emitAtomicBinaryPartword(MI, BB, 1);
1404 case Mips::ATOMIC_LOAD_UMIN_I16:
1405 return emitAtomicBinaryPartword(MI, BB, 2);
1406 case Mips::ATOMIC_LOAD_UMIN_I32:
1407 return emitAtomicBinary(MI, BB);
1408 case Mips::ATOMIC_LOAD_UMIN_I64:
1409 return emitAtomicBinary(MI, BB);
1410
1411 case Mips::ATOMIC_LOAD_UMAX_I8:
1412 return emitAtomicBinaryPartword(MI, BB, 1);
1413 case Mips::ATOMIC_LOAD_UMAX_I16:
1414 return emitAtomicBinaryPartword(MI, BB, 2);
1415 case Mips::ATOMIC_LOAD_UMAX_I32:
1416 return emitAtomicBinary(MI, BB);
1417 case Mips::ATOMIC_LOAD_UMAX_I64:
1418 return emitAtomicBinary(MI, BB);
1419
1420 case Mips::PseudoSDIV:
1421 case Mips::PseudoUDIV:
1422 case Mips::DIV:
1423 case Mips::DIVU:
1424 case Mips::MOD:
1425 case Mips::MODU:
1427 false);
1428 case Mips::SDIV_MM_Pseudo:
1429 case Mips::UDIV_MM_Pseudo:
1430 case Mips::SDIV_MM:
1431 case Mips::UDIV_MM:
1432 case Mips::DIV_MMR6:
1433 case Mips::DIVU_MMR6:
1434 case Mips::MOD_MMR6:
1435 case Mips::MODU_MMR6:
1437 case Mips::PseudoDSDIV:
1438 case Mips::PseudoDUDIV:
1439 case Mips::DDIV:
1440 case Mips::DDIVU:
1441 case Mips::DMOD:
1442 case Mips::DMODU:
1444
1445 case Mips::PseudoSELECT_I:
1446 case Mips::PseudoSELECT_I64:
1447 case Mips::PseudoSELECT_S:
1448 case Mips::PseudoSELECT_D32:
1449 case Mips::PseudoSELECT_D64:
1450 return emitPseudoSELECT(MI, BB, false, Mips::BNE);
1451 case Mips::PseudoSELECTFP_F_I:
1452 case Mips::PseudoSELECTFP_F_I64:
1453 case Mips::PseudoSELECTFP_F_S:
1454 case Mips::PseudoSELECTFP_F_D32:
1455 case Mips::PseudoSELECTFP_F_D64:
1456 return emitPseudoSELECT(MI, BB, true, Mips::BC1F);
1457 case Mips::PseudoSELECTFP_T_I:
1458 case Mips::PseudoSELECTFP_T_I64:
1459 case Mips::PseudoSELECTFP_T_S:
1460 case Mips::PseudoSELECTFP_T_D32:
1461 case Mips::PseudoSELECTFP_T_D64:
1462 return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
1463 case Mips::PseudoD_SELECT_I:
1464 case Mips::PseudoD_SELECT_I64:
1465 return emitPseudoD_SELECT(MI, BB);
1466 case Mips::LDR_W:
1467 return emitLDR_W(MI, BB);
1468 case Mips::LDR_D:
1469 return emitLDR_D(MI, BB);
1470 case Mips::STR_W:
1471 return emitSTR_W(MI, BB);
1472 case Mips::STR_D:
1473 return emitSTR_D(MI, BB);
1474 }
1475}
1476
1477
1478
1480MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
1482
1487
1488 unsigned AtomicOp;
1489 bool NeedsAdditionalReg = false;
1490 switch (MI.getOpcode()) {
1491 case Mips::ATOMIC_LOAD_ADD_I32:
1492 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1493 break;
1494 case Mips::ATOMIC_LOAD_SUB_I32:
1495 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1496 break;
1497 case Mips::ATOMIC_LOAD_AND_I32:
1498 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1499 break;
1500 case Mips::ATOMIC_LOAD_OR_I32:
1501 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1502 break;
1503 case Mips::ATOMIC_LOAD_XOR_I32:
1504 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1505 break;
1506 case Mips::ATOMIC_LOAD_NAND_I32:
1507 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1508 break;
1509 case Mips::ATOMIC_SWAP_I32:
1510 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1511 break;
1512 case Mips::ATOMIC_LOAD_ADD_I64:
1513 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1514 break;
1515 case Mips::ATOMIC_LOAD_SUB_I64:
1516 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1517 break;
1518 case Mips::ATOMIC_LOAD_AND_I64:
1519 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1520 break;
1521 case Mips::ATOMIC_LOAD_OR_I64:
1522 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1523 break;
1524 case Mips::ATOMIC_LOAD_XOR_I64:
1525 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1526 break;
1527 case Mips::ATOMIC_LOAD_NAND_I64:
1528 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1529 break;
1530 case Mips::ATOMIC_SWAP_I64:
1531 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1532 break;
1533 case Mips::ATOMIC_LOAD_MIN_I32:
1534 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1535 NeedsAdditionalReg = true;
1536 break;
1537 case Mips::ATOMIC_LOAD_MAX_I32:
1538 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1539 NeedsAdditionalReg = true;
1540 break;
1541 case Mips::ATOMIC_LOAD_UMIN_I32:
1542 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1543 NeedsAdditionalReg = true;
1544 break;
1545 case Mips::ATOMIC_LOAD_UMAX_I32:
1546 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1547 NeedsAdditionalReg = true;
1548 break;
1549 case Mips::ATOMIC_LOAD_MIN_I64:
1550 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1551 NeedsAdditionalReg = true;
1552 break;
1553 case Mips::ATOMIC_LOAD_MAX_I64:
1554 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1555 NeedsAdditionalReg = true;
1556 break;
1557 case Mips::ATOMIC_LOAD_UMIN_I64:
1558 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1559 NeedsAdditionalReg = true;
1560 break;
1561 case Mips::ATOMIC_LOAD_UMAX_I64:
1562 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1563 NeedsAdditionalReg = true;
1564 break;
1565 default:
1567 }
1568
1569 Register OldVal = MI.getOperand(0).getReg();
1570 Register Ptr = MI.getOperand(1).getReg();
1571 Register Incr = MI.getOperand(2).getReg();
1573
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1613
1616
1624 if (NeedsAdditionalReg) {
1626 RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1629 }
1630
1631 MI.eraseFromParent();
1632
1633 return BB;
1634}
1635
1638 unsigned SrcReg) const {
1639 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1641
1644 return BB;
1645 }
1646
1649 return BB;
1650 }
1651
1652 MachineFunction *MF = BB->getParent();
1653 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1654 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1656
1658 int64_t ShiftImm = 32 - (Size * 8);
1659
1662
1663 return BB;
1664}
1665
1669 "Unsupported size for EmitAtomicBinaryPartial.");
1670
1671 MachineFunction *MF = BB->getParent();
1672 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1673 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1674 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1675 const TargetRegisterClass *RCp =
1677 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1679
1680 Register Dest = MI.getOperand(0).getReg();
1681 Register Ptr = MI.getOperand(1).getReg();
1682 Register Incr = MI.getOperand(2).getReg();
1683
1695
1696 unsigned AtomicOp = 0;
1697 bool NeedsAdditionalReg = false;
1698 switch (MI.getOpcode()) {
1699 case Mips::ATOMIC_LOAD_NAND_I8:
1700 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1701 break;
1702 case Mips::ATOMIC_LOAD_NAND_I16:
1703 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1704 break;
1705 case Mips::ATOMIC_SWAP_I8:
1706 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1707 break;
1708 case Mips::ATOMIC_SWAP_I16:
1709 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1710 break;
1711 case Mips::ATOMIC_LOAD_ADD_I8:
1712 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1713 break;
1714 case Mips::ATOMIC_LOAD_ADD_I16:
1715 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1716 break;
1717 case Mips::ATOMIC_LOAD_SUB_I8:
1718 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1719 break;
1720 case Mips::ATOMIC_LOAD_SUB_I16:
1721 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1722 break;
1723 case Mips::ATOMIC_LOAD_AND_I8:
1724 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1725 break;
1726 case Mips::ATOMIC_LOAD_AND_I16:
1727 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1728 break;
1729 case Mips::ATOMIC_LOAD_OR_I8:
1730 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1731 break;
1732 case Mips::ATOMIC_LOAD_OR_I16:
1733 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1734 break;
1735 case Mips::ATOMIC_LOAD_XOR_I8:
1736 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1737 break;
1738 case Mips::ATOMIC_LOAD_XOR_I16:
1739 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1740 break;
1741 case Mips::ATOMIC_LOAD_MIN_I8:
1742 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1743 NeedsAdditionalReg = true;
1744 break;
1745 case Mips::ATOMIC_LOAD_MIN_I16:
1746 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1747 NeedsAdditionalReg = true;
1748 break;
1749 case Mips::ATOMIC_LOAD_MAX_I8:
1750 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1751 NeedsAdditionalReg = true;
1752 break;
1753 case Mips::ATOMIC_LOAD_MAX_I16:
1754 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1755 NeedsAdditionalReg = true;
1756 break;
1757 case Mips::ATOMIC_LOAD_UMIN_I8:
1758 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1759 NeedsAdditionalReg = true;
1760 break;
1761 case Mips::ATOMIC_LOAD_UMIN_I16:
1762 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1763 NeedsAdditionalReg = true;
1764 break;
1765 case Mips::ATOMIC_LOAD_UMAX_I8:
1766 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1767 NeedsAdditionalReg = true;
1768 break;
1769 case Mips::ATOMIC_LOAD_UMAX_I16:
1770 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1771 NeedsAdditionalReg = true;
1772 break;
1773 default:
1774 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
1775 }
1776
1777
1781 MF->insert(It, exitMBB);
1782
1783
1787
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1801 BuildMI(BB, DL, TII->get(ABI.GetPtrAddiuOp()), MaskLSB2)
1803 BuildMI(BB, DL, TII->get(ABI.GetPtrAndOp()), AlignedAddr)
1805 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1806 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1809 } else {
1814 }
1815 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1821
1822
1823
1824
1825
1826
1827 MachineInstrBuilder MIB =
1830 .addReg(AlignedAddr)
1841 if (NeedsAdditionalReg) {
1845 }
1846
1847 MI.eraseFromParent();
1848
1849 return exitMBB;
1850}
1851
1852
1853
1854
1855
1856
1858MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
1860
1861 assert((MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1862 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1863 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1864
1865 const unsigned Size = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1866
1867 MachineFunction *MF = BB->getParent();
1870 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1872
1873 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1874 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1875 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1876 Register Dest = MI.getOperand(0).getReg();
1877 Register Ptr = MI.getOperand(1).getReg();
1878 Register OldVal = MI.getOperand(2).getReg();
1879 Register NewVal = MI.getOperand(3).getReg();
1880
1881 Register Scratch = MRI.createVirtualRegister(RC);
1883
1884
1885
1886
1887
1888
1889 Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));
1890 Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));
1891 Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));
1892
1896
1897
1898
1899
1900
1908
1909 MI.eraseFromParent();
1910
1911 return BB;
1912}
1913
1914MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
1917 "Unsupported size for EmitAtomicCmpSwapPartial.");
1918
1919 MachineFunction *MF = BB->getParent();
1920 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1921 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1922 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1923 const TargetRegisterClass *RCp =
1925 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1927
1928 Register Dest = MI.getOperand(0).getReg();
1929 Register Ptr = MI.getOperand(1).getReg();
1930 Register CmpVal = MI.getOperand(2).getReg();
1931 Register NewVal = MI.getOperand(3).getReg();
1932
1944 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1945 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1946 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1960
1961
1965 MF->insert(It, exitMBB);
1966
1967
1971
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1988 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
1990 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
1992 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1993 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1996 } else {
2001 }
2002 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
2007 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
2009 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
2011 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
2013 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
2015
2016
2017
2018
2019
2022 .addReg(AlignedAddr)
2024 .addReg(ShiftedCmpVal)
2026 .addReg(ShiftedNewVal)
2032
2033 MI.eraseFromParent();
2034
2035 return exitMBB;
2036}
2037
2038SDValue MipsTargetLowering::lowerREADCYCLECOUNTER(SDValue Op,
2043 unsigned RdhwrOpc, DestReg;
2045
2046 if (PtrVT == MVT::i64) {
2047 RdhwrOpc = Mips::RDHWR64;
2049 SDNode *Rdhwr = DAG.getMachineNode(RdhwrOpc, DL, MVT::i64, MVT::Glue,
2056 Results.push_back(ResNode);
2058 } else {
2059 RdhwrOpc = Mips::RDHWR;
2061 SDNode *Rdhwr = DAG.getMachineNode(RdhwrOpc, DL, MVT::i32, MVT::Glue,
2071 }
2072
2074}
2075
2077
2078
2079 SDValue Chain = Op.getOperand(0);
2080 SDValue Dest = Op.getOperand(2);
2082
2085
2086
2087 if (CondRes.getOpcode() != MipsISD::FPCmp)
2088 return Op;
2089
2095 return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
2096 FCC0, Dest, CondRes);
2097}
2098
2099SDValue MipsTargetLowering::
2101{
2104
2105
2106 if (Cond.getOpcode() != MipsISD::FPCmp)
2107 return Op;
2108
2110 SDLoc(Op));
2111}
2112
2116
2117 assert(Cond.getOpcode() == MipsISD::FPCmp &&
2118 "Floating point operand expected.");
2119
2123
2125}
2126
2129
2131 SDValue Chain = Op.getOperand(0);
2135
2141
2143}
2144
2147 EVT Ty = Op.getValueType();
2149 const GlobalValue *GV = N->getGlobal();
2150
2153 "Windows is the only supported COFF target");
2157 }
2158
2160 const MipsTargetObjectFile *TLOF =
2161 static_cast<const MipsTargetObjectFile *>(
2164 if (GO && TLOF->IsGlobalInSmallSection(GO, getTargetMachine()))
2165
2167
2168
2170
2172 }
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2187
2193
2198}
2199
2203 EVT Ty = Op.getValueType();
2204
2208
2210}
2211
2212SDValue MipsTargetLowering::
2214{
2215
2216
2217
2218
2222
2223 SDLoc DL(GA);
2224 const GlobalValue *GV = GA->getGlobal();
2226
2228
2230
2233
2239
2241
2243 Args.emplace_back(Argument, PtrTy);
2244
2245 TargetLowering::CallLoweringInfo CLI(DAG);
2246 CLI.setDebugLoc(DL)
2248 .setLibCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2249 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2250
2251 SDValue Ret = CallResult.first;
2252
2254 return Ret;
2255
2264 }
2265
2268
2272 TGA);
2275 } else {
2276
2285 }
2286
2287 SDValue ThreadPointer = DAG.getNode(MipsISD::ThreadPointer, DL, PtrVT);
2289}
2290
2291SDValue MipsTargetLowering::
2293{
2295 EVT Ty = Op.getValueType();
2296
2300
2302}
2303
2304SDValue MipsTargetLowering::
2306{
2308 EVT Ty = Op.getValueType();
2309
2311 const MipsTargetObjectFile *TLOF =
2312 static_cast<const MipsTargetObjectFile *>(
2314
2317
2319
2322 }
2323
2325}
2326
2329 MipsFunctionInfo *FuncInfo = MF.getInfo();
2330
2334
2335
2336
2338 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2339 MachinePointerInfo(SV));
2340}
2341
2343 SDNode *Node = Op.getNode();
2344 EVT VT = Node->getValueType(0);
2346 SDValue VAListPtr = Node->getOperand(1);
2348 llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
2350 SDLoc DL(Node);
2351 unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
2352
2354 VAListPtr, MachinePointerInfo(SV));
2355 SDValue VAList = VAListLoad;
2356
2357
2358
2359
2360
2361
2362
2363
2368
2372 }
2373
2374
2376 unsigned ArgSizeInBytes =
2382
2384 MachinePointerInfo(SV));
2385
2386
2387
2388
2389
2390
2391 if (.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
2392 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2395 }
2396
2397 return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo());
2398}
2399
2401 bool HasExtractInsert) {
2402 EVT TyX = Op.getOperand(0).getValueType();
2403 EVT TyY = Op.getOperand(1).getValueType();
2408
2409
2410
2411 SDValue X = (TyX == MVT::f32) ?
2413 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2414 Const1);
2415 SDValue Y = (TyY == MVT::f32) ?
2417 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
2418 Const1);
2419
2420 if (HasExtractInsert) {
2421
2422
2423 SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
2424 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
2425 } else {
2426
2427
2428
2429
2430
2436 }
2437
2438 if (TyX == MVT::f32)
2440
2441 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2442 Op.getOperand(0),
2444 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2445}
2446
2448 bool HasExtractInsert) {
2449 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
2450 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
2454
2455
2458
2459 if (HasExtractInsert) {
2460
2461
2463 DAG.getConstant(WidthY - 1, DL, MVT::i32), Const1);
2464
2465 if (WidthX > WidthY)
2467 else if (WidthY > WidthX)
2469
2471 DAG.getConstant(WidthX - 1, DL, MVT::i32), Const1,
2472 X);
2474 }
2475
2476
2477
2478
2479
2480
2485
2486 if (WidthX > WidthY)
2488 else if (WidthY > WidthX)
2490
2495}
2496
2501
2503}
2504
2506 bool HasExtractInsert) const {
2509
2512
2513
2514
2515 SDValue X = (Op.getValueType() == MVT::f32)
2517 : DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2518 Op.getOperand(0), Const1);
2519
2520
2521 if (HasExtractInsert)
2522 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
2525 else {
2526
2527
2530 }
2531
2532 if (Op.getValueType() == MVT::f32)
2534
2535
2536
2537
2538
2540 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2542 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2543}
2544
2546 bool HasExtractInsert) const {
2549
2552
2553
2555
2556
2557 if (HasExtractInsert)
2558 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
2559 DAG.getRegister(Mips::ZERO_64, MVT::i64),
2561 else {
2564 }
2565
2567}
2568
2570 if ((ABI.IsN32() || ABI.IsN64()) && (Op.getValueType() == MVT::f64))
2571 return lowerFABS64(Op, DAG, Subtarget.hasExtractInsert());
2572
2573 return lowerFABS32(Op, DAG, Subtarget.hasExtractInsert());
2574}
2575
2579 EVT VT = Op.getValueType();
2580 SDValue Operand = Op.getOperand(0);
2581 SDNodeFlags Flags = Op->getFlags();
2582
2584 return Operand;
2585
2588}
2589
2590SDValue MipsTargetLowering::
2592
2593 if (Op.getConstantOperandVal(0) != 0) {
2595 "return address can be determined only for current frame");
2597 }
2598
2601 EVT VT = Op.getValueType();
2604 DAG.getEntryNode(), DL, ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
2605 return FrameAddr;
2606}
2607
2610
2611 if (Op.getConstantOperandVal(0) != 0) {
2613 "return address can be determined only for current frame");
2615 }
2616
2619 MVT VT = Op.getSimpleValueType();
2620 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
2622
2623
2626}
2627
2628
2629
2630
2631
2633 const {
2635 MipsFunctionInfo *MipsFI = MF.getInfo();
2636
2638 SDValue Chain = Op.getOperand(0);
2640 SDValue Handler = Op.getOperand(2);
2642 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
2643
2644
2645
2646 unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
2647 unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
2650 return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
2654}
2655
2658
2659
2660 unsigned SType = 0;
2662 return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
2664}
2665
2669 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2670
2672 SDValue Shamt = Op.getOperand(2);
2673
2674
2675
2676
2677
2678
2693
2696}
2697
2699 bool IsSRA) const {
2702 SDValue Shamt = Op.getOperand(2);
2703 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2732
2734 SDVTList VTList = DAG.getVTList(VT, VT);
2737 DL, VTList, Cond, ShiftRightHi,
2739 ShiftRightHi);
2740 }
2741
2744 IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi);
2745
2748}
2749
2752 SDValue Ptr = LD->getBasePtr();
2753 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2757
2761
2762 SDValue Ops[] = { Chain, Ptr, Src };
2764 LD->getMemOperand());
2765}
2766
2767
2770 EVT MemVT = LD->getMemoryVT();
2771
2772 if (Subtarget.systemSupportsUnalignedAccess())
2773 return Op;
2774
2775
2776 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) ||
2777 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2779
2780 bool IsLittle = Subtarget.isLittle();
2781 EVT VT = Op.getValueType();
2783 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2784
2785 assert((VT == MVT::i32) || (VT == MVT::i64));
2786
2787
2788
2789
2790
2791
2794 IsLittle ? 7 : 0);
2796 IsLittle ? 0 : 7);
2797 }
2798
2800 IsLittle ? 3 : 0);
2802 IsLittle ? 0 : 3);
2803
2804
2805
2806
2807
2808
2809
2810
2811 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2813 return LWR;
2814
2816
2817
2818
2819
2820
2821
2822
2823
2830}
2831
2847
2848
2850 bool IsLittle) {
2852 EVT VT = Value.getValueType();
2853
2854
2855
2856
2857
2858
2859
2862 IsLittle ? 3 : 0);
2863 return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2864 }
2865
2866 assert(VT == MVT::i64);
2867
2868
2869
2870
2871
2872
2873 SDValue SDL = createStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
2874 return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2875}
2876
2877
2879 bool SingleFloat) {
2881
2885
2892}
2893
2897
2898
2899 if (.systemSupportsUnalignedAccess() &&
2901 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2903
2905}
2906
2909
2910
2911
2913 EVT ValTy = Op->getValueType(0);
2916}
2917
2922
2925 Op.getOperand(0));
2927}
2928
2929SDValue MipsTargetLowering::lowerSTRICT_FP_TO_INT(SDValue Op,
2931 assert(Op->isStrictFPOpcode());
2932 SDValue SrcVal = Op.getOperand(1);
2933 SDLoc Loc(Op);
2934
2938 Loc, Op.getValueType(), SrcVal);
2939
2941}
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2972 State.getMachineFunction().getSubtarget());
2973
2974 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2975
2976 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
2977
2978 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2979
2980
2982 return true;
2983
2984
2986 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2987 LocVT = MVT::i32;
2988 if (ArgFlags.isSExt())
2990 else if (ArgFlags.isZExt())
2992 else
2994 }
2995 }
2996
2997
2998 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2999 LocVT = MVT::i32;
3000 if (ArgFlags.isSExt())
3002 else if (ArgFlags.isZExt())
3004 else
3006 }
3007
3008 unsigned Reg;
3009
3010
3011
3012
3013 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
3014 State.getFirstUnallocated(F32Regs) != ValNo;
3016 bool isI64 = (ValVT == MVT::i32 && OrigAlign == Align(8));
3018
3019
3020 if (ValVT == MVT::i32 && isVectorFloat) {
3021
3022
3023
3024
3025 if (ArgFlags.isSplit()) {
3026 Reg = State.AllocateReg(FloatVectorIntRegs);
3027 if (Reg == Mips::A2)
3028 State.AllocateReg(Mips::A1);
3029 else if (Reg == 0)
3030 State.AllocateReg(Mips::A3);
3031 } else {
3032
3033
3035 }
3036 } else if (ValVT == MVT::i32 ||
3037 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
3039
3040
3041 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
3043 LocVT = MVT::i32;
3044 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
3045
3046
3048 if (Reg == Mips::A1 || Reg == Mips::A3)
3050
3051 if (Reg) {
3052 LocVT = MVT::i32;
3053
3054 State.addLoc(
3058 State.addLoc(
3060 return false;
3061 }
3062 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
3063
3064 if (ValVT == MVT::f32) {
3066
3067 State.AllocateReg(IntRegs);
3068 } else {
3069 Reg = State.AllocateReg(F64Regs);
3070
3072 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
3073 State.AllocateReg(IntRegs);
3074 State.AllocateReg(IntRegs);
3075 }
3076 } else
3078
3079 if () {
3080 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign);
3082 } else
3084
3085 return false;
3086}
3087
3092 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
3093
3094 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State,
3095 F64Regs);
3096}
3097
3102 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3103
3104 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State,
3105 F64Regs);
3106}
3107
3108[[maybe_unused]] static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
3112
3113#include "MipsGenCallingConv.inc"
3114
3116 return CC_Mips_FixedArg;
3117 }
3118
3120 return RetCC_Mips;
3121 }
3122
3123
3124
3125
3126SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
3128 const SDLoc &DL, bool IsTailCall,
3130 if (!IsTailCall) {
3135 }
3136
3142}
3143
3146 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3147 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3161 unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
3162 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
3163 RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
3164 }
3165
3166
3167
3168
3169
3171
3172 for (auto &R : RegsToPass) {
3173 Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InGlue);
3175 }
3176
3177
3178
3179 for (auto &R : RegsToPass)
3180 Ops.push_back(CLI.DAG.getRegister(R.first, R.second.getValueType()));
3181
3182
3186 assert(Mask && "Missing call preserved mask for calling convention");
3187 if (Subtarget.inMips16HardFloat()) {
3189 StringRef Sym = G->getGlobal()->getName();
3190 Function *F = G->getGlobal()->getParent()->getFunction(Sym);
3191 if (F && F->hasFnAttribute("__Mips16RetHelper")) {
3193 }
3194 }
3195 }
3197
3199 Ops.push_back(InGlue);
3200}
3201
3204 switch (MI.getOpcode()) {
3205 default:
3206 return;
3207 case Mips::JALR:
3208 case Mips::JALRPseudo:
3209 case Mips::JALR64:
3210 case Mips::JALR64Pseudo:
3211 case Mips::JALR16_MM:
3212 case Mips::JALRC16_MMR6:
3213 case Mips::TAILCALLREG:
3214 case Mips::TAILCALLREG64:
3215 case Mips::TAILCALLR6REG:
3216 case Mips::TAILCALL64R6REG:
3217 case Mips::TAILCALLREG_MM:
3218 case Mips::TAILCALLREG_MMR6: {
3222 Node->getNumOperands() < 1 ||
3223 Node->getOperand(0).getNumOperands() < 2) {
3224 return;
3225 }
3226
3227
3228
3229 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3233
3234
3235
3237 LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "
3238 << G->getGlobal()->getName() << "\n");
3239 return;
3240 }
3241 Sym = G->getGlobal()->getName();
3242 }
3245 Sym = ES->getSymbol();
3246 }
3247
3248 if (Sym.empty())
3249 return;
3250
3253 LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");
3255 }
3256 }
3257}
3258
3259
3260
3273 bool IsVarArg = CLI.IsVarArg;
3275
3281
3282
3287
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315 bool MemcpyInByVal = ES && StringRef(ES->getSymbol()) == "memcpy" &&
3318
3319
3320
3321 unsigned ReservedArgArea =
3322 MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
3323 CCInfo.AllocateStack(ReservedArgArea, Align(1));
3324
3325 CCInfo.AnalyzeCallOperands(Outs, CC_Mips);
3326
3327
3328 unsigned StackSize = CCInfo.getStackSize();
3329
3330
3332
3335
3336
3337
3338 bool InternalLinkage = false;
3339 if (IsTailCall) {
3340 IsTailCall = isEligibleForTailCallOptimization(
3343 InternalLinkage = G->getGlobal()->hasInternalLinkage();
3344 IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||
3345 G->getGlobal()->hasPrivateLinkage() ||
3346 G->getGlobal()->hasHiddenVisibility() ||
3347 G->getGlobal()->hasProtectedVisibility());
3348 }
3349 }
3351 report_fatal_error("failed to perform tail call elimination on a call "
3352 "site marked musttail");
3353
3354 if (IsTailCall)
3355 ++NumTailCalls;
3356
3357
3358
3359
3361 StackSize = alignTo(StackSize, StackAlignment);
3362
3363 if (!(IsTailCall || MemcpyInByVal))
3365
3369 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3371
3372 CCInfo.rewindByValRegsInfo();
3373
3374
3375 for (unsigned i = 0, e = ArgLocs.size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3376 SDValue Arg = OutVals[OutIdx];
3377 CCValAssign &VA = ArgLocs[i];
3379 ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
3380 bool UseUpperBits = false;
3381
3382
3383 if (Flags.isByVal()) {
3384 unsigned FirstByValReg, LastByValReg;
3385 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3386 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3387
3389 "ByVal args of size 0 should have been ignored by front-end.");
3390 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3391 assert(!IsTailCall &&
3392 "Do not tail-call optimize if there is a byval argument.");
3393 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3394 FirstByValReg, LastByValReg, Flags, Subtarget.isLittle(),
3395 VA);
3396 CCInfo.nextInRegsParam();
3397 continue;
3398 }
3399
3400
3402 default:
3406 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3407 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3408 (ValVT == MVT::i64 && LocVT == MVT::f64))
3410 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3417
3419
3421 Register LocRegHigh = ArgLocs[++i].getLocReg();
3422 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
3423 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
3424 continue;
3425 }
3426 }
3427 break;
3430 break;
3432 UseUpperBits = true;
3433 [[fallthrough]];
3436 break;
3438 UseUpperBits = true;
3439 [[fallthrough]];
3442 break;
3444 UseUpperBits = true;
3445 [[fallthrough]];
3448 break;
3449 }
3450
3451 if (UseUpperBits) {
3452 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3457 }
3458
3459
3460
3462 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3463
3464
3465
3467 continue;
3468
3469
3471 if (Options.EmitCallSiteInfo)
3473
3474 continue;
3475 }
3476
3477
3479
3480
3481
3483 Chain, Arg, DL, IsTailCall, DAG));
3484 }
3485
3486
3487
3488 if (!MemOpChains.empty())
3490
3491
3492
3493
3494
3495 EVT Ty = Callee.getValueType();
3496 bool GlobalOrExternal = false, IsCallReloc = false;
3497
3498
3499
3500
3501 if (.isABICalls() && !IsPIC) {
3502
3503
3504
3511 bool UseLongCalls = Subtarget.useLongCalls();
3512
3513
3515 if (F->hasFnAttribute("long-call"))
3516 UseLongCalls = true;
3517 else if (F->hasFnAttribute("short-call"))
3518 UseLongCalls = false;
3519 }
3520 if (UseLongCalls)
3524 }
3525 }
3526
3529 G->getGlobal()->hasDLLImportStorageClass()) {
3531 "Windows is the only supported COFF target");
3532 auto PtrInfo = MachinePointerInfo();
3535 } else if (IsPIC) {
3536 const GlobalValue *Val = G->getGlobal();
3538
3539 if (InternalLinkage)
3545 IsCallReloc = true;
3546 } else {
3549 IsCallReloc = true;
3550 }
3551 } else
3555 GlobalOrExternal = true;
3556 }
3558 const char *Sym = S->getSymbol();
3559
3560 if (!IsPIC)
3567 IsCallReloc = true;
3568 } else {
3571 IsCallReloc = true;
3572 }
3573
3574 GlobalOrExternal = true;
3575 }
3576
3578 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3579
3580 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3581 IsCallReloc, CLI, Callee, Chain);
3582
3583 if (IsTailCall) {
3587 return Ret;
3588 }
3589
3590 Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
3592
3594
3595
3596
3597 if (!(MemcpyInByVal)) {
3598 Chain = DAG.getCALLSEQ_END(Chain, StackSize, 0, InGlue, DL);
3600 }
3601
3602
3603
3604 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
3605 InVals, CLI);
3606}
3607
3608
3609
3610SDValue MipsTargetLowering::LowerCallResult(
3615
3617 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3619
3620 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips);
3621
3622
3623 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3624 CCValAssign &VA = RVLocs[i];
3625 assert(VA.isRegLoc() && "Can only return in registers!");
3626
3628 RVLocs[i].getLocVT(), InGlue);
3631
3633 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
3635 unsigned Shift =
3640 }
3641
3643 default:
3646 break;
3649 break;
3653 break;
3659 break;
3665 break;
3666 }
3667
3669 }
3670
3671 return Chain;
3672}
3673
3679
3680
3682 default:
3683 break;
3689 unsigned Opcode =
3694 break;
3695 }
3696 }
3697
3698
3699
3700
3701
3703 default:
3706 break;
3710 break;
3715 break;
3720 break;
3723 break;
3724 }
3725
3726 return Val;
3727}
3728
3729
3730
3731
3732
3733
3734SDValue MipsTargetLowering::LowerFormalArguments(
3740 MipsFunctionInfo *MipsFI = MF.getInfo();
3741
3743
3744
3745 std::vector OutChains;
3746
3747
3749 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
3751 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), Align(1));
3754
3755 if (Func.hasFnAttribute("interrupt") && .arg_empty())
3757 "Functions with the interrupt attribute cannot have arguments!");
3758
3759 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3761 CCInfo.getInRegsParamsCount() > 0);
3762
3763 unsigned CurArgIdx = 0;
3764 CCInfo.rewindByValRegsInfo();
3765
3766 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3767 CCValAssign &VA = ArgLocs[i];
3768 if (Ins[InsIdx].isOrigArg()) {
3769 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3770 CurArgIdx = Ins[InsIdx].getOrigArgIndex();
3771 }
3773 ISD::ArgFlagsTy Flags = Ins[InsIdx].Flags;
3774 bool IsRegLoc = VA.isRegLoc();
3775
3776 if (Flags.isByVal()) {
3777 assert(Ins[InsIdx].isOrigArg() && "Byval arguments cannot be implicit");
3778 unsigned FirstByValReg, LastByValReg;
3779 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3780 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3781
3783 "ByVal args of size 0 should have been ignored by front-end.");
3784 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3785 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3786 FirstByValReg, LastByValReg, VA, CCInfo);
3787 CCInfo.nextInRegsParam();
3788 continue;
3789 }
3790
3791
3792 if (IsRegLoc) {
3795 const TargetRegisterClass *RC = getRegClassFor(RegVT);
3796
3797
3798
3801
3802 ArgValue =
3804
3805
3806
3807 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3808 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3809 (RegVT == MVT::f64 && ValVT == MVT::i64))
3811 else if (ABI.IsO32() && RegVT == MVT::i32 &&
3812 ValVT == MVT::f64) {
3813 assert(VA.needsCustom() && "Expected custom argument for f64 split");
3814 CCValAssign &NextVA = ArgLocs[++i];
3815 unsigned Reg2 =
3820 ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
3821 ArgValue, ArgValue2);
3822 }
3823
3825 } else {
3827
3828 assert(!VA.needsCustom() && "unexpected custom memory argument");
3829
3830
3832
3833
3836
3837
3840 LocVT, DL, Chain, FIN,
3842 OutChains.push_back(ArgValue.getValue(1));
3843
3844 ArgValue =
3846
3848 }
3849 }
3850
3851 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3852
3853 if (ArgLocs[i].needsCustom()) {
3854 ++i;
3855 continue;
3856 }
3857
3858
3859
3860
3861 if (Ins[InsIdx].Flags.isSRet()) {
3863 if () {
3867 }
3870 break;
3871 }
3872 }
3873
3874 if (IsVarArg)
3875 writeVarArgRegs(OutChains, Chain, DL, DAG, CCInfo);
3876
3877
3878
3879 if (!OutChains.empty()) {
3880 OutChains.push_back(Chain);
3882 }
3883
3884 return Chain;
3885}
3886
3887
3888
3889
3890
3891bool
3892MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3897 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3898 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3899}
3900
3901bool MipsTargetLowering::shouldSignExtendTypeInLibCall(Type *Ty,
3902 bool IsSigned) const {
3904 return true;
3905
3906 return IsSigned;
3907}
3908
3914 MipsFunctionInfo *MipsFI = MF.getInfo();
3915
3917
3918 return DAG.getNode(MipsISD::ERet, DL, MVT::Other, RetOps);
3919}
3920
3923 bool IsVarArg,
3927
3928
3931
3932
3933 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3934
3935
3936 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3937
3940
3941
3942 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3943 SDValue Val = OutVals[i];
3944 CCValAssign &VA = RVLocs[i];
3945 assert(VA.isRegLoc() && "Can only return in registers!");
3946 bool UseUpperBits = false;
3947
3949 default:
3952 break;
3955 break;
3957 UseUpperBits = true;
3958 [[fallthrough]];
3961 break;
3963 UseUpperBits = true;
3964 [[fallthrough]];
3967 break;
3969 UseUpperBits = true;
3970 [[fallthrough]];
3973 break;
3974 }
3975
3976 if (UseUpperBits) {
3977 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3982 }
3983
3985
3986
3989 }
3990
3991
3992
3993
3994
3996 MipsFunctionInfo *MipsFI = MF.getInfo();
3998
3999 if ()
4000 llvm_unreachable("sret virtual register not created in the entry block");
4003 unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
4004
4008 }
4009
4010 RetOps[0] = Chain;
4011
4012
4015
4016
4018 return LowerInterruptReturn(RetOps, DL, DAG);
4019
4020
4021 return DAG.getNode(MipsISD::Ret, DL, MVT::Other, RetOps);
4022}
4023
4024
4025
4026
4027
4028
4029
4031MipsTargetLowering::getConstraintType(StringRef Constraint) const {
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043 if (Constraint.size() == 1) {
4044 switch (Constraint[0]) {
4045 default : break;
4046 case 'd':
4047 case 'y':
4048 case 'f':
4049 case 'c':
4050 case 'l':
4051 case 'x':
4053 case 'R':
4055 }
4056 }
4057
4058 if (Constraint == "ZC")
4060
4062}
4063
4064
4065
4066
4068MipsTargetLowering::getSingleConstraintMatchWeight(
4069 AsmOperandInfo &info, const char *constraint) const {
4071 Value *CallOperandVal = info.CallOperandVal;
4072
4073
4074 if (!CallOperandVal)
4077
4078 switch (*constraint) {
4079 default:
4081 break;
4082 case 'd':
4083 case 'y':
4086 break;
4087 case 'f':
4093 break;
4094 case 'c':
4095 case 'l':
4096 case 'x':
4099 break;
4100 case 'I':
4101 case 'J':
4102 case 'K':
4103 case 'L':
4104 case 'N':
4105 case 'O':
4106 case 'P':
4109 break;
4110 case 'R':
4112 break;
4113 }
4114 return weight;
4115}
4116
4117
4118
4119
4120
4122 unsigned long long &Reg) {
4123 if (C.front() != '{' || C.back() != '}')
4124 return std::make_pair(false, false);
4125
4126
4128 I = std::find_if(B, E, isdigit);
4129
4131
4132
4134 return std::make_pair(true, false);
4135
4136
4138 true);
4139}
4140
4145 return VT.bitsLT(MinVT) ? MinVT : VT;
4146}
4147
4148std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
4149parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
4154 unsigned long long Reg;
4155
4157
4158 if (!R.first)
4159 return std::make_pair(0U, nullptr);
4160
4161 if ((Prefix == "hi" || Prefix == "lo")) {
4162
4163 if (R.second)
4164 return std::make_pair(0U, nullptr);
4165
4166 RC = TRI->getRegClass(Prefix == "hi" ?
4167 Mips::HI32RegClassID : Mips::LO32RegClassID);
4168 return std::make_pair(*(RC->begin()), RC);
4169 } else if (Prefix.starts_with("$msa")) {
4170
4171
4172
4173 if (R.second)
4174 return std::make_pair(0U, nullptr);
4175
4177 .Case("$msair", Mips::MSAIR)
4178 .Case("$msacsr", Mips::MSACSR)
4179 .Case("$msaaccess", Mips::MSAAccess)
4180 .Case("$msasave", Mips::MSASave)
4181 .Case("$msamodify", Mips::MSAModify)
4182 .Case("$msarequest", Mips::MSARequest)
4183 .Case("$msamap", Mips::MSAMap)
4184 .Case("$msaunmap", Mips::MSAUnmap)
4186
4187 if ()
4188 return std::make_pair(0U, nullptr);
4189
4190 RC = TRI->getRegClass(Mips::MSACtrlRegClassID);
4191 return std::make_pair(Reg, RC);
4192 }
4193
4194 if (.second)
4195 return std::make_pair(0U, nullptr);
4196
4197 if (Prefix == "$f") {
4198
4199
4200
4201
4202 if (VT == MVT::Other) {
4204 VT = MVT::f32;
4205 else
4206 VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
4207 }
4208
4210
4211 if (RC == &Mips::AFGR64RegClass) {
4213 Reg >>= 1;
4214 }
4215 } else if (Prefix == "$fcc")
4216 RC = TRI->getRegClass(Mips::FCCRegClassID);
4217 else if (Prefix == "$w") {
4218 RC = getRegClassFor((VT == MVT::Other) ? MVT::v16i8 : VT);
4219 } else {
4220 assert(Prefix == "$");
4221 RC = getRegClassFor((VT == MVT::Other) ? MVT::i32 : VT);
4222 }
4223
4225 return std::make_pair(*(RC->begin() + Reg), RC);
4226}
4227
4228
4229
4230
4231std::pair<unsigned, const TargetRegisterClass *>
4234 MVT VT) const {
4235 if (Constraint.size() == 1) {
4236 switch (Constraint[0]) {
4237 case 'd':
4238 case 'y':
4239 case 'r':
4240 if ((VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 ||
4241 VT == MVT::i1) ||
4242 (VT == MVT::f32 && Subtarget.useSoftFloat())) {
4244 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
4245 return std::make_pair(0U, &Mips::GPR32RegClass);
4246 }
4247 if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat()) ||
4248 (VT == MVT::f64 && Subtarget.isSingleFloat())) &&
4250 return std::make_pair(0U, &Mips::GPR32RegClass);
4251 if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat()) ||
4252 (VT == MVT::f64 && Subtarget.isSingleFloat())) &&
4254 return std::make_pair(0U, &Mips::GPR64RegClass);
4255
4256 return std::make_pair(0U, nullptr);
4257 case 'f':
4258 if (VT == MVT::v16i8)
4259 return std::make_pair(0U, &Mips::MSA128BRegClass);
4260 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
4261 return std::make_pair(0U, &Mips::MSA128HRegClass);
4262 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
4263 return std::make_pair(0U, &Mips::MSA128WRegClass);
4264 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
4265 return std::make_pair(0U, &Mips::MSA128DRegClass);
4266 else if (VT == MVT::f32)
4267 return std::make_pair(0U, &Mips::FGR32RegClass);
4268 else if ((VT == MVT::f64) && (.isSingleFloat())) {
4270 return std::make_pair(0U, &Mips::FGR64RegClass);
4271 return std::make_pair(0U, &Mips::AFGR64RegClass);
4272 }
4273 break;
4274 case 'c':
4275 if (VT == MVT::i32)
4276 return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass);
4277 if (VT == MVT::i64)
4278 return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
4279
4280 return std::make_pair(0U, nullptr);
4281 case 'l':
4282
4283 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
4284 return std::make_pair((unsigned)Mips::LO0, &Mips::LO32RegClass);
4285 return std::make_pair((unsigned)Mips::LO0_64, &Mips::LO64RegClass);
4286 case 'x':
4287
4288
4289
4290 return std::make_pair(0U, nullptr);
4291 }
4292 }
4293
4294 if (!Constraint.empty()) {
4295 std::pair<unsigned, const TargetRegisterClass *> R;
4296 R = parseRegForInlineAsmConstraint(Constraint, VT);
4297
4298 if (R.second)
4299 return R;
4300 }
4301
4303}
4304
4305
4306
4307void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4309 std::vector &Ops,
4313
4314
4315 if (Constraint.size() > 1)
4316 return;
4317
4318 char ConstraintLetter = Constraint[0];
4319 switch (ConstraintLetter) {
4320 default: break;
4321 case 'I':
4322
4324 EVT Type = Op.getValueType();
4325 int64_t Val = C->getSExtValue();
4328 break;
4329 }
4330 }
4331 return;
4332 case 'J':
4334 EVT Type = Op.getValueType();
4335 int64_t Val = C->getZExtValue();
4336 if (Val == 0) {
4338 break;
4339 }
4340 }
4341 return;
4342 case 'K':
4344 EVT Type = Op.getValueType();
4345 uint64_t Val = C->getZExtValue();
4348 break;
4349 }
4350 }
4351 return;
4352 case 'L':
4354 EVT Type = Op.getValueType();
4355 int64_t Val = C->getSExtValue();
4356 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
4358 break;
4359 }
4360 }
4361 return;
4362 case 'N':
4364 EVT Type = Op.getValueType();
4365 int64_t Val = C->getSExtValue();
4366 if ((Val >= -65535) && (Val <= -1)) {
4368 break;
4369 }
4370 }
4371 return;
4372 case 'O':
4374 EVT Type = Op.getValueType();
4375 int64_t Val = C->getSExtValue();
4378 break;
4379 }
4380 }
4381 return;
4382 case 'P':
4384 EVT Type = Op.getValueType();
4385 int64_t Val = C->getSExtValue();
4386 if ((Val <= 65535) && (Val >= 1)) {
4388 break;
4389 }
4390 }
4391 return;
4392 }
4393
4394 if (Result.getNode()) {
4395 Ops.push_back(Result);
4396 return;
4397 }
4398
4400}
4401
4402bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,
4404 unsigned AS,
4406
4407 if (AM.BaseGV)
4408 return false;
4409
4410 switch (AM.Scale) {
4411 case 0:
4412 break;
4413 case 1:
4414 if (!AM.HasBaseReg)
4415 break;
4416 return false;
4417 default:
4418 return false;
4419 }
4420
4421 return true;
4422}
4423
4424bool
4425MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4426
4427 return false;
4428}
4429
4430EVT MipsTargetLowering::getOptimalMemOpType(
4432 const AttributeList &FuncAttributes) const {
4434 return MVT::i64;
4435
4436 return MVT::i32;
4437}
4438
4439bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4440 bool ForCodeSize) const {
4441 if (VT != MVT::f32 && VT != MVT::f64)
4442 return false;
4443 if (Imm.isNegZero())
4444 return false;
4445 return Imm.isZero();
4446}
4447
4448bool MipsTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
4450}
4451
4452bool MipsTargetLowering::isLegalAddImmediate(int64_t Imm) const {
4454}
4455
4459 if (ABI.IsN64())
4462}
4463
4464SDValue MipsTargetLowering::getPICJumpTableRelocBase(SDValue Table,
4467 return Table;
4469}
4470
4472 return Subtarget.useSoftFloat();
4473}
4474
4475void MipsTargetLowering::copyByValRegs(
4476 SDValue Chain, const SDLoc &DL, std::vector &OutChains,
4479 unsigned FirstReg, unsigned LastReg, const CCValAssign &VA,
4483 unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes();
4484 unsigned NumRegs = LastReg - FirstReg;
4485 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
4486 unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
4487 int FrameObjOffset;
4489
4490 if (RegAreaSize)
4491 FrameObjOffset =
4492 (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
4493 (int)((ByValArgRegs.size() - FirstReg) * GPRSizeInBytes);
4494 else
4496
4497
4499
4500
4501
4502
4503
4504 int FI = MFI.CreateFixedObject(FrameObjSize, FrameObjOffset, false, true);
4507
4508 if (!NumRegs)
4509 return;
4510
4511
4513 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4514
4515 for (unsigned I = 0; I < NumRegs; ++I) {
4516 unsigned ArgReg = ByValArgRegs[FirstReg + I];
4517 unsigned VReg = addLiveIn(MF, ArgReg, RC);
4518 unsigned Offset = I * GPRSizeInBytes;
4522 StorePtr, MachinePointerInfo(FuncArg, Offset));
4523 OutChains.push_back(Store);
4524 }
4525}
4526
4527
4528void MipsTargetLowering::passByValArg(
4530 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
4533 unsigned LastReg, const ISD::ArgFlagsTy &Flags, bool isLittle,
4535 unsigned ByValSizeInBytes = Flags.getByValSize();
4536 unsigned OffsetInBytes = 0;
4537 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4538 Align Alignment =
4539 std::min(Flags.getNonZeroByValAlign(), Align(RegSizeInBytes));
4542 unsigned NumRegs = LastReg - FirstReg;
4543
4544 if (NumRegs) {
4546 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
4547 unsigned I = 0;
4548
4549
4550 for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) {
4554 MachinePointerInfo(), Alignment);
4556 unsigned ArgReg = ArgRegs[FirstReg + I];
4557 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
4558 }
4559
4560
4561 if (ByValSizeInBytes == OffsetInBytes)
4562 return;
4563
4564
4565 if (LeftoverBytes) {
4567
4568 for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
4569 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
4570 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
4571
4572 if (RemainingSizeInBytes < LoadSizeInBytes)
4573 continue;
4574
4575
4578 PtrTy));
4580 ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
4583
4584
4585 unsigned Shamt;
4586
4587 if (isLittle)
4588 Shamt = TotalBytesLoaded * 8;
4589 else
4590 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
4591
4594
4597 else
4598 Val = Shift;
4599
4600 OffsetInBytes += LoadSizeInBytes;
4601 TotalBytesLoaded += LoadSizeInBytes;
4602 Alignment = std::min(Alignment, Align(LoadSizeInBytes));
4603 }
4604
4605 unsigned ArgReg = ArgRegs[FirstReg + I];
4606 RegsToPass.push_back(std::make_pair(ArgReg, Val));
4607 return;
4608 }
4609 }
4610
4611
4612 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
4618 Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, DL, PtrTy),
4619 Align(Alignment), false, false,
4620 nullptr, std::nullopt, MachinePointerInfo(), MachinePointerInfo());
4622}
4623
4624void MipsTargetLowering::writeVarArgRegs(std::vector &OutChains,
4630 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4632 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4635 MipsFunctionInfo *MipsFI = MF.getInfo();
4636
4637
4638 int VaArgOffset;
4639
4640 if (ArgRegs.size() == Idx)
4642 else {
4643 VaArgOffset =
4644 (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
4645 (int)(RegSizeInBytes * (ArgRegs.size() - Idx));
4646 }
4647
4648
4649
4650 int FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4652
4653
4654
4655
4656
4657 for (unsigned I = Idx; I < ArgRegs.size();
4658 ++I, VaArgOffset += RegSizeInBytes) {
4664 DAG.getStore(Chain, DL, ArgValue, PtrOff, MachinePointerInfo());
4666 (Value *)nullptr);
4667 OutChains.push_back(Store);
4668 }
4669}
4670
4672 Align Alignment) const {
4674
4675 assert(Size && "Byval argument's size shouldn't be 0.");
4676
4677 Alignment = std::min(Alignment, TFL->getStackAlign());
4678
4679 unsigned FirstReg = 0;
4680 unsigned NumRegs = 0;
4681
4683 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4685
4688
4689
4690
4692 Alignment >= Align(RegSizeInBytes) &&
4693 "Byval argument's alignment should be a multiple of RegSizeInBytes.");
4694
4695 FirstReg = State->getFirstUnallocated(IntArgRegs);
4696
4697
4698
4699
4700
4701 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {
4702 State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);
4703 ++FirstReg;
4704 }
4705
4706
4708 for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
4709 Size -= RegSizeInBytes, ++I, ++NumRegs)
4710 State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
4711 }
4712
4713 State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
4714}
4715
4718 bool isFPCmp,
4719 unsigned Opc) const {
4721 "Subtarget already supports SELECT nodes with the use of"
4722 "conditional-move instructions.");
4723
4727
4728
4729
4730
4731
4734
4735
4736
4737
4738
4739
4740
4745 F->insert(It, copy0MBB);
4746 F->insert(It, sinkMBB);
4747
4748
4752
4753
4756
4757 if (isFPCmp) {
4758
4760 .addReg(MI.getOperand(1).getReg())
4762 } else {
4763
4765 .addReg(MI.getOperand(1).getReg())
4768 }
4769
4770
4771
4772
4773 BB = copy0MBB;
4774
4775
4777
4778
4779
4780
4781 BB = sinkMBB;
4782
4783 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4784 .addReg(MI.getOperand(2).getReg())
4786 .addReg(MI.getOperand(3).getReg())
4788
4789 MI.eraseFromParent();
4790
4791 return BB;
4792}
4793
4795MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
4798 "Subtarget already supports SELECT nodes with the use of"
4799 "conditional-move instructions.");
4800
4801 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4803
4804
4805
4806
4807
4808
4811
4812
4813
4814
4815
4816
4817
4818 MachineBasicBlock *thisMBB = BB;
4819 MachineFunction *F = BB->getParent();
4820 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4821 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4823 F->insert(It, sinkMBB);
4824
4825
4829
4830
4833
4834
4836 .addReg(MI.getOperand(2).getReg())
4839
4840
4841
4842
4843 BB = copy0MBB;
4844
4845
4847
4848
4849
4850
4851 BB = sinkMBB;
4852
4853
4854 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4855 .addReg(MI.getOperand(3).getReg())
4857 .addReg(MI.getOperand(5).getReg())
4859 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(1).getReg())
4860 .addReg(MI.getOperand(4).getReg())
4862 .addReg(MI.getOperand(6).getReg())
4864
4865 MI.eraseFromParent();
4866
4867 return BB;
4868}
4869
4870
4871int MipsTargetLowering::getCPURegisterIndex(StringRef Name) const {
4872 int CC;
4873
4874 CC = StringSwitch(Name)
4875 .Case("zero", 0)
4876 .Case("at", 1)
4877 .Case("AT", 1)
4878 .Case("a0", 4)
4879 .Case("a1", 5)
4880 .Case("a2", 6)
4881 .Case("a3", 7)
4882 .Case("v0", 2)
4883 .Case("v1", 3)
4884 .Case("s0", 16)
4885 .Case("s1", 17)
4886 .Case("s2", 18)
4887 .Case("s3", 19)
4888 .Case("s4", 20)
4889 .Case("s5", 21)
4890 .Case("s6", 22)
4891 .Case("s7", 23)
4892 .Case("k0", 26)
4893 .Case("k1", 27)
4894 .Case("gp", 28)
4895 .Case("sp", 29)
4896 .Case("fp", 30)
4897 .Case("s8", 30)
4898 .Case("ra", 31)
4899 .Case("t0", 8)
4900 .Case("t1", 9)
4901 .Case("t2", 10)
4902 .Case("t3", 11)
4903 .Case("t4", 12)
4904 .Case("t5", 13)
4905 .Case("t6", 14)
4906 .Case("t7", 15)
4907 .Case("t8", 24)
4908 .Case("t9", 25)
4909 .Default(-1);
4910
4911 if (!(ABI.IsN32() || ABI.IsN64()))
4912 return CC;
4913
4914
4915
4916
4917 if (8 <= CC && CC <= 11)
4918 CC += 4;
4919
4920 if (CC == -1)
4921 CC = StringSwitch(Name)
4922 .Case("a4", 8)
4923 .Case("a5", 9)
4924 .Case("a6", 10)
4925 .Case("a7", 11)
4926 .Case("kt0", 26)
4927 .Case("kt1", 27)
4928 .Default(-1);
4929
4930 return CC;
4931}
4932
4933
4934
4938
4939 std::string newRegName = RegName;
4942
4943
4944 std::smatch matchResult;
4945 int regIdx;
4946 static const std::regex matchStr("^[0-9]*$");
4947 if (std::regex_match(newRegName, matchResult, matchStr))
4948 regIdx = std::stoi(newRegName);
4949 else {
4951 regIdx = getCPURegisterIndex(StringRef(newRegName));
4952 }
4953
4954
4955 if (regIdx >= 0 && regIdx < 32) {
4958 ? MRI->getRegClass(Mips::GPR64RegClassID)
4959 : MRI->getRegClass(Mips::GPR32RegClassID);
4961 }
4962
4965}
4966
4974
4975 Register Dest = MI.getOperand(0).getReg();
4977 unsigned Imm = MI.getOperand(2).getImm();
4978
4980
4982
4983 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4989 } else {
4990
4991
4992 Register LoadHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4993 Register LoadFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4994 Register Undef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4999 .addImm(Imm + (IsLittle ? 0 : 3))
5004 .addImm(Imm + (IsLittle ? 3 : 0))
5007 }
5008
5009 MI.eraseFromParent();
5010 return BB;
5011}
5012
5015 MachineFunction *MF = BB->getParent();
5017 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
5018 const bool IsLittle = Subtarget.isLittle();
5020
5021 Register Dest = MI.getOperand(0).getReg();
5023 unsigned Imm = MI.getOperand(2).getImm();
5024
5026
5028
5030 Register Temp = MRI.createVirtualRegister(&Mips::GPR64RegClass);
5036 } else {
5037 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5038 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5039 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5043 .addImm(Imm + (IsLittle ? 0 : 4));
5047 .addImm(Imm + (IsLittle ? 4 : 0));
5049 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
5053 }
5054 } else {
5055
5056
5057 Register LoHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5058 Register LoFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5059 Register LoUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5060 Register HiHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5061 Register HiFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5062 Register HiUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5063 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5068 .addImm(Imm + (IsLittle ? 0 : 7))
5073 .addImm(Imm + (IsLittle ? 3 : 4))
5079 .addImm(Imm + (IsLittle ? 4 : 3))
5084 .addImm(Imm + (IsLittle ? 7 : 0))
5087 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
5091 }
5092
5093 MI.eraseFromParent();
5094 return BB;
5095}
5096
5099 MachineFunction *MF = BB->getParent();
5101 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
5102 const bool IsLittle = Subtarget.isLittle();
5104
5105 Register StoreVal = MI.getOperand(0).getReg();
5107 unsigned Imm = MI.getOperand(2).getImm();
5108
5110
5112
5113 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5114 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5124 } else {
5125
5126
5127 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5135 .addImm(Imm + (IsLittle ? 0 : 3));
5139 .addImm(Imm + (IsLittle ? 3 : 0));
5140 }
5141
5142 MI.eraseFromParent();
5143
5144 return BB;
5145}
5146
5149 MachineFunction *MF = BB->getParent();
5151 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
5152 const bool IsLittle = Subtarget.isLittle();
5154
5155 Register StoreVal = MI.getOperand(0).getReg();
5157 unsigned Imm = MI.getOperand(2).getImm();
5158
5160
5162
5164 Register BitcastD = MRI.createVirtualRegister(&Mips::MSA128DRegClass);
5165 Register Lo = MRI.createVirtualRegister(&Mips::GPR64RegClass);
5177 } else {
5178 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5179 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5180 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5195 .addImm(Imm + (IsLittle ? 0 : 4));
5199 .addImm(Imm + (IsLittle ? 4 : 0));
5200 }
5201 } else {
5202
5203
5204 Register Bitcast = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5205 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5206 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5219 .addImm(Imm + (IsLittle ? 0 : 3));
5223 .addImm(Imm + (IsLittle ? 3 : 0));
5227 .addImm(Imm + (IsLittle ? 4 : 7));
5231 .addImm(Imm + (IsLittle ? 7 : 4));
5232 }
5233
5234 MI.eraseFromParent();
5235 return BB;
5236}
unsigned const MachineRegisterInfo * MRI
static SDValue performSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
If the operand is a bitwise AND with a constant RHS, and the shift has a constant RHS and is the only...
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
cl::opt< bool > EmitJalrReloc
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, const MipsSubtarget &Subtarget)
Definition MipsISelLowering.cpp:917
static bool invertFPCondCodeUser(Mips::CondCode CC)
This function returns true if the floating point conditional branches and conditional moves which use...
Definition MipsISelLowering.cpp:540
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State, ArrayRef< MCPhysReg > F64Regs)
Definition MipsISelLowering.cpp:2967
static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG, bool SingleFloat)
Definition MipsISelLowering.cpp:2878
static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
Definition MipsISelLowering.cpp:475
static const MCPhysReg Mips64DPRegs[8]
Definition MipsISelLowering.cpp:93
static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG, bool IsLittle)
Definition MipsISelLowering.cpp:2849
static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, SDValue Chain, unsigned Offset)
Definition MipsISelLowering.cpp:2832
static unsigned addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
Definition MipsISelLowering.cpp:1269
static std::pair< bool, bool > parsePhysicalReg(StringRef C, StringRef &Prefix, unsigned long long &Reg)
This is a helper function to parse a physical register string and split it into non-numeric and numer...
Definition MipsISelLowering.cpp:4121
static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, SDValue Chain, SDValue Src, unsigned Offset)
Definition MipsISelLowering.cpp:2750
static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
Definition MipsISelLowering.cpp:2447
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
Definition MipsISelLowering.cpp:1027
static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
Definition MipsISelLowering.cpp:1012
static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op)
Definition MipsISelLowering.cpp:552
static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
Definition MipsISelLowering.cpp:2400
static cl::opt< bool > NoZeroDivCheck("mno-check-zero-division", cl::Hidden, cl::desc("MIPS: Don't trap on integer division by zero."), cl::init(false))
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
Definition MipsISelLowering.cpp:585
static SDValue performSignExtendCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
Definition MipsISelLowering.cpp:1118
static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
Definition MipsISelLowering.cpp:666
static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA, EVT ArgVT, const SDLoc &DL, SelectionDAG &DAG)
Definition MipsISelLowering.cpp:3674
static Mips::CondCode condCodeToFCC(ISD::CondCode CC)
Definition MipsISelLowering.cpp:512
static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, SDValue False, const SDLoc &DL)
Definition MipsISelLowering.cpp:575
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
SI optimize exec mask operations pre RA
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
static const MCPhysReg IntRegs[32]
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const MCPhysReg F32Regs[64]
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
static BranchProbability getOne()
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
CallingConv::ID getCallingConv() const
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
bool isUpperBitsInLoc() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
uint64_t getZExtValue() const
int64_t getSExtValue() const
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
const char * getSymbol() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
const Argument * const_arg_iterator
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
bool hasDLLImportStorageClass() const
LLVM_ABI const GlobalObject * getAliaseeObject() const
bool hasInternalLinkage() const
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
const MCRegisterInfo * getRegisterInfo() const
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
MCRegisterClass - Base class of TargetRegisterClass.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
static auto fp_fixedlen_vector_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ MOVolatile
The memory access is volatile.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
static SpecialCallingConvType getSpecialCallingConvForCallee(const SDNode *Callee, const MipsSubtarget &Subtarget)
Determine the SpecialCallingConvType for the given callee.
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
void setVarArgsFrameIndex(int Index)
unsigned getSRetReturnReg() const
int getVarArgsFrameIndex() const
MachinePointerInfo callPtrInfo(MachineFunction &MF, const char *ES)
Create a MachinePointerInfo that has an ExternalSymbolPseudoSourceValue object representing a GOT ent...
Register getGlobalBaseReg(MachineFunction &MF)
void setSRetReturnReg(unsigned Reg)
void setFormalArgInfo(unsigned Size, bool HasByval)
static const uint32_t * getMips16RetHelperMask()
const MipsInstrInfo * getInstrInfo() const override
bool inMips16Mode() const
const MipsRegisterInfo * getRegisterInfo() const override
bool hasExtractInsert() const
Features related to the presence of specific instructions.
bool isSingleFloat() const
const TargetFrameLowering * getFrameLowering() const override
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...
Definition MipsISelLowering.cpp:100
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
Definition MipsISelLowering.cpp:1189
static const MipsTargetLowering * create(const MipsTargetMachine &TM, const MipsSubtarget &STI)
Definition MipsISelLowering.cpp:439
SDValue getAddrGPRel(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN64) const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Break down vectors to the correct number of gpr sized integers.
Definition MipsISelLowering.cpp:124
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
Definition MipsISelLowering.cpp:4936
SDValue getAddrNonPICSym64(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - get the ISD::SETCC result ValueType
Definition MipsISelLowering.cpp:468
SDValue getAddrGlobal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
Definition MipsISelLowering.cpp:449
MipsTargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
Definition MipsISelLowering.cpp:176
SDValue getAddrGlobalLargeGOT(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned HiFlag, unsigned LoFlag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
SDValue getDllimportVariable(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, SDValue Chain, const MachinePointerInfo &PtrInfo) const
bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override
Return true if it is profitable to fold a pair of shifts into a mask.
Definition MipsISelLowering.cpp:1199
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
Definition MipsISelLowering.cpp:1149
CCAssignFn * CCAssignFnForReturn() const
Definition MipsISelLowering.cpp:3119
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
Definition MipsISelLowering.cpp:1213
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
Definition MipsISelLowering.cpp:1307
SDValue getDllimportSymbol(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
CCAssignFn * CCAssignFnForCall() const
Definition MipsISelLowering.cpp:3115
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...
Definition MipsISelLowering.cpp:112
SDValue getAddrNonPIC(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const
Definition MipsISelLowering.cpp:2894
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
Definition MipsISelLowering.cpp:3202
virtual void getOpndList(SmallVectorImpl< SDValue > &Ops, std::deque< std::pair< unsigned, SDValue > > &RegsToPass, bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const
This function fills Ops, which is the list of operands that will later be used when a function call n...
Definition MipsISelLowering.cpp:3145
EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const override
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
Definition MipsISelLowering.cpp:4141
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
Definition MipsISelLowering.cpp:1185
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
Definition MipsISelLowering.cpp:1220
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
Definition MipsISelLowering.cpp:1181
SDValue getAddrLocal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN32OrN64) const
SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const
Definition MipsISelLowering.cpp:139
const MipsSubtarget & Subtarget
void HandleByVal(CCState *, unsigned &, Align) const override
Target-specific cleanup for formal ByVal parameters.
Definition MipsISelLowering.cpp:4671
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const
Definition MipsISelLowering.cpp:2768
bool IsConstantInSmallSection(const DataLayout &DL, const Constant *CN, const TargetMachine &TM) const
Return true if this constant should be placed into small data section.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)
Set CallSiteInfo to be associated with Node.
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
constexpr bool empty() const
empty - Check if the string is empty.
const char * const_iterator
constexpr size_t size() const
size - Get the string size.
LLVM_ABI std::string lower() const
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
virtual bool useSoftFloat() const
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
virtual TargetLoweringObjectFile * getObjFileLowering() const
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
unsigned EmitCallGraphSection
Emit section containing call graph metadata.
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FADD
Simple binary floating point operators.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ BasicBlock
Various leaf nodes.
@ SHL
Shift and rotation operations.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ MO_TLSGD
On a symbol operand, this indicates that the immediate is the offset to the slot in GOT which stores ...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ EarlyClobber
Register definition happens before uses.
Not(const Pred &P) -> Not< Pred >
initializer< Ty > init(const Ty &Val)
NodeAddr< NodeBase * > Node
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast - Return the argument parameter cast to the specified type.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
const MipsTargetLowering * createMips16TargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
Create MipsTargetLowering objects.
@ Or
Bitwise or logical OR of integers.
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
const MipsTargetLowering * createMipsSETargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
LLVM_ABI bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
Align getNonZeroOrigAlign() const
SmallVector< ArgRegPair, 1 > ArgRegPairs
Vector of call argument and its forwarding register.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const