LLVM: lib/Target/Mips/MipsISelLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

69#include

70#include

71#include

72#include

73#include

74#include

75#include

76#include

77

78using namespace llvm;

79

80#define DEBUG_TYPE "mips-lower"

81

82STATISTIC(NumTailCalls, "Number of tail calls");

83

86 cl::desc("MIPS: Don't trap on integer division by zero."),

88

90

92 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,

93 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64

94};

95

96

97

100 EVT VT) const {

103

106 : MVT::i64;

108}

109

112 EVT VT) const {

118 }

120}

121

124 unsigned &NumIntermediates, MVT &RegisterVT) const {

127 RegisterVT = IntermediateVT.getSimpleVT();

129 return NumIntermediates;

130 }

134 return NumIntermediates * getNumRegisters(Context, IntermediateVT);

135}

136

141}

142

145 unsigned Flag) const {

147}

148

151 unsigned Flag) const {

153}

154

157 unsigned Flag) const {

159}

160

163 unsigned Flag) const {

165}

166

169 unsigned Flag) const {

171 N->getOffset(), Flag);

172}

173

292 }

293 return nullptr;

294}

295

298 : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {

299

300

303

304

308

309

314 }

315

316

317

321 }

322

323

328 }

329

332

334

335

336

337

338

340

341

359

360

372 } else {

375 }

376

387 } else {

390 }

395 }

396

401 }

402

406

415

416

433 } else {

436 }

443

446

449

466

467

472

474

479

480

483

487 }

488

492 }

493

494

499

504

515 }

516

518

521

524 else

526

528

529

530

533

535

537

539}

540

546

548}

549

550

556

557

558 bool UseFastISel = TM.Options.EnableFastISel && Subtarget.hasMips32() &&

561

562

563

564 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||

566 UseFastISel = false;

567

569}

570

572 EVT VT) const {

574 return MVT::i32;

576}

577

583

584 EVT Ty = N->getValueType(0);

585 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;

586 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;

590

592 N->getOperand(0), N->getOperand(1));

595

596

597 if (N->hasAnyUseOfValue(0)) {

599 InGlue);

601 InChain = CopyFromLo.getValue(1);

602 InGlue = CopyFromLo.getValue(2);

603 }

604

605

606 if (N->hasAnyUseOfValue(1)) {

608 HI, Ty, InGlue);

610 }

611

613}

614

616 switch (CC) {

638 }

639}

640

641

642

645 return false;

646

648 "Illegal Condition Code");

649

650 return true;

651}

652

653

654

656

658 return Op;

659

661

662 if (LHS.getValueType().isFloatingPoint())

663 return Op;

664

667

668

669

670 ISD::CondCode CC = cast(Op.getOperand(2))->get();

671

674}

675

676

682

685}

686

692

693 SDValue SetCC = N->getOperand(0);

694

698

699 SDValue False = N->getOperand(2);

701

704

705 ConstantSDNode *FalseC = dyn_cast(False);

706

707

708

709

710

711

712

713

714 if (!FalseC)

716

718

721 SDValue True = N->getOperand(1);

722

726

728 }

729

730

731

732 SDValue True = N->getOperand(1);

733 ConstantSDNode *TrueC = dyn_cast(True);

734

737

738

739

740

743

745

746

747

748

749 if (Diff == 1)

751

752

753

754

755

756 if (Diff == -1) {

762 }

763

764

766}

767

773

774 SDValue ValueIfTrue = N->getOperand(0), ValueIfFalse = N->getOperand(2);

775

776 ConstantSDNode *FalseC = dyn_cast(ValueIfFalse);

779

780

781

782

783

784

785

786

789

790 SDValue FCC = N->getOperand(1), Glue = N->getOperand(3);

791 return DAG.getNode(Opc, SDLoc(N), ValueIfFalse.getValueType(),

792 ValueIfFalse, FCC, ValueIfTrue, Glue);

793}

794

800

801 SDValue FirstOperand = N->getOperand(0);

802 unsigned FirstOperandOpc = FirstOperand.getOpcode();

803 SDValue Mask = N->getOperand(1);

804 EVT ValTy = N->getValueType(0);

806

808 unsigned SMPos, SMSize;

811 unsigned Opc;

812

813

814 if (!(CN = dyn_cast(Mask)) ||

817

818 if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {

819

820

821

822

823

824 if (!(CN = dyn_cast(FirstOperand.getOperand(1))))

826

828

829

830

831 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())

833

835 NewOperand = FirstOperand.getOperand(0);

836 } else if (FirstOperandOpc == ISD::SHL && Subtarget.hasCnMips()) {

837

838

839

840

841

842

843

844 if (!(CN = dyn_cast(FirstOperand.getOperand(1))))

846

848

849 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||

852

853 NewOperand = FirstOperand.getOperand(0);

854

855 SMSize--;

857 } else {

858

859

860

861

862

865

866

867 if (SMPos)

869

871 NewOperand = FirstOperand;

872 }

873 return DAG.getNode(Opc, DL, ValTy, NewOperand,

876}

877

883

884 SDValue FirstOperand = N->getOperand(0), SecondOperand = N->getOperand(1);

885 unsigned SMPos0, SMSize0, SMPos1, SMSize1;

887

889 SecondOperand.getOpcode() == ISD::SHL) ||

891 SecondOperand.getOpcode() == ISD::AND)) {

892

893

894

895

896

897

902 ? SecondOperand.getOperand(0)

907 if (!(CN = dyn_cast(AndMask)) ||

910

912 ? SecondOperand.getOperand(1)

914 if (!(CN = dyn_cast(ShlShift)))

917

918 if (SMPos0 != 0 || SMSize0 != ShlShiftValue)

920

922 EVT ValTy = N->getValueType(0);

923 SMPos1 = ShlShiftValue;

925 SMSize1 = (ValTy == MVT::i64 ? 64 : 32) - SMPos1;

928 DAG.getConstant(SMSize1, DL, MVT::i32), AndOperand0);

929 }

930

931

934

935

936

937

938

939 if (!(CN = dyn_cast(FirstOperand.getOperand(1))) ||

942

943

944 if (SecondOperand.getOpcode() == ISD::AND &&

945 SecondOperand.getOperand(0).getOpcode() == ISD::SHL) {

946

947 if (!(CN = dyn_cast(SecondOperand.getOperand(1))) ||

950

951

952 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)

954

956

957 if (!(CN = dyn_cast(Shl.getOperand(1))))

959

961

962

963

964 EVT ValTy = N->getValueType(0);

965 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))

967

973 } else {

974

975

976

977

978 if (~CN->getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&

979 ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) ||

980 (SMSize0 + SMPos0 <= 32))) {

981

983 if (SecondOperand.getOpcode() == ISD::AND) {

984 if (!(CN1 = dyn_cast(SecondOperand->getOperand(1))))

986 } else {

987 if (!(CN1 = dyn_cast(N->getOperand(1))))

989 }

990

991

994

996 EVT ValTy = N->getOperand(0)->getValueType(0);

999 if (!isConstCase) {

1001 SrlX = DAG.getNode(ISD::SRL, DL, SecondOperand->getValueType(0),

1002 SecondOperand, Const1);

1003 }

1006 isConstCase

1008 : SrlX,

1011 : SMSize0,

1012 DL, MVT::i32),

1014 }

1016 }

1017}

1018

1021

1022

1026

1027

1028

1029

1030

1034

1035

1038

1039

1040

1041

1042

1043

1044

1045

1046

1047

1048

1049

1050

1051

1052

1053

1054

1055

1056

1057

1060

1064

1068

1069

1070

1071 if (!Mult.hasOneUse())

1073

1074

1075

1076

1077

1078

1079 SDValue MultLHS = Mult->getOperand(0);

1080 SDValue MultRHS = Mult->getOperand(1);

1081

1086

1087 if (!IsSigned && !IsUnsigned)

1089

1090

1092 SDValue BottomHalf, TopHalf;

1093 std::tie(BottomHalf, TopHalf) =

1094 CurDAG.SplitScalar(AddOperand, DL, MVT::i32, MVT::i32);

1097

1098

1105 SDValue MAdd = CurDAG.getNode(Opcode, DL, MVT::Untyped, MAddOps);

1106

1111 return Combined;

1112}

1113

1117

1120 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)

1122

1124 }

1125

1127}

1128

1132

1135 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)

1137

1139 }

1140

1141

1143

1146

1148

1152

1153 EVT ValTy = N->getValueType(0);

1155

1157 Add.getOperand(0));

1159}

1160

1164

1165

1166

1167

1170

1171 SDValue FirstOperand = N->getOperand(0);

1172 unsigned FirstOperandOpc = FirstOperand.getOpcode();

1173 SDValue SecondOperand = N->getOperand(1);

1174 EVT ValTy = N->getValueType(0);

1176

1178 unsigned SMPos, SMSize;

1181

1182

1183 if (!(CN = dyn_cast(SecondOperand)))

1185

1187

1190

1191 if (FirstOperandOpc != ISD::AND)

1193

1194

1195 if (!(CN = dyn_cast(FirstOperand.getOperand(1))) ||

1198

1199

1200

1201 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())

1203

1204 NewOperand = FirstOperand.getOperand(0);

1205

1206 SMSize--;

1207

1211}

1212

1214 const {

1216 unsigned Opc = N->getOpcode();

1217

1218 switch (Opc) {

1219 default: break;

1238 }

1239

1241}

1242

1245}

1246

1249}

1250

1252

1253

1254

1255 if (auto *C = dyn_cast(Y))

1256 return C->getAPIntValue().ule(15);

1257

1258 return false;

1259}

1260

1264 N->getOperand(0).getOpcode() == ISD::SRL) ||

1266 N->getOperand(0).getOpcode() == ISD::SHL)) &&

1267 "Expected shift-shift mask");

1268

1269 if (N->getOperand(0).getValueType().isVector())

1270 return false;

1271 return true;

1272}

1273

1274void

1279}

1280

1283{

1284 switch (Op.getOpcode())

1285 {

1293 case ISD::SETCC: return lowerSETCC(Op, DAG);

1295 case ISD::VAARG: return lowerVAARG(Op, DAG);

1297 case ISD::FABS: return lowerFABS(Op, DAG);

1299 return lowerFCANONICALIZE(Op, DAG);

1305 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);

1306 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false);

1311 }

1313}

1314

1315

1316

1317

1318

1319

1320

1321

1322static unsigned

1324{

1327 return VReg;

1328}

1329

1333 bool Is64Bit, bool IsMicroMips) {

1335 return &MBB;

1336

1337

1341 MIB = BuildMI(MBB, std::next(I), MI.getDebugLoc(),

1342 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))

1346

1347

1348 if (Is64Bit)

1350

1351

1353

1354

1355

1356

1357 return &MBB;

1358}

1359

1363 switch (MI.getOpcode()) {

1364 default:

1366 case Mips::ATOMIC_LOAD_ADD_I8:

1367 return emitAtomicBinaryPartword(MI, BB, 1);

1368 case Mips::ATOMIC_LOAD_ADD_I16:

1369 return emitAtomicBinaryPartword(MI, BB, 2);

1370 case Mips::ATOMIC_LOAD_ADD_I32:

1371 return emitAtomicBinary(MI, BB);

1372 case Mips::ATOMIC_LOAD_ADD_I64:

1373 return emitAtomicBinary(MI, BB);

1374

1375 case Mips::ATOMIC_LOAD_AND_I8:

1376 return emitAtomicBinaryPartword(MI, BB, 1);

1377 case Mips::ATOMIC_LOAD_AND_I16:

1378 return emitAtomicBinaryPartword(MI, BB, 2);

1379 case Mips::ATOMIC_LOAD_AND_I32:

1380 return emitAtomicBinary(MI, BB);

1381 case Mips::ATOMIC_LOAD_AND_I64:

1382 return emitAtomicBinary(MI, BB);

1383

1384 case Mips::ATOMIC_LOAD_OR_I8:

1385 return emitAtomicBinaryPartword(MI, BB, 1);

1386 case Mips::ATOMIC_LOAD_OR_I16:

1387 return emitAtomicBinaryPartword(MI, BB, 2);

1388 case Mips::ATOMIC_LOAD_OR_I32:

1389 return emitAtomicBinary(MI, BB);

1390 case Mips::ATOMIC_LOAD_OR_I64:

1391 return emitAtomicBinary(MI, BB);

1392

1393 case Mips::ATOMIC_LOAD_XOR_I8:

1394 return emitAtomicBinaryPartword(MI, BB, 1);

1395 case Mips::ATOMIC_LOAD_XOR_I16:

1396 return emitAtomicBinaryPartword(MI, BB, 2);

1397 case Mips::ATOMIC_LOAD_XOR_I32:

1398 return emitAtomicBinary(MI, BB);

1399 case Mips::ATOMIC_LOAD_XOR_I64:

1400 return emitAtomicBinary(MI, BB);

1401

1402 case Mips::ATOMIC_LOAD_NAND_I8:

1403 return emitAtomicBinaryPartword(MI, BB, 1);

1404 case Mips::ATOMIC_LOAD_NAND_I16:

1405 return emitAtomicBinaryPartword(MI, BB, 2);

1406 case Mips::ATOMIC_LOAD_NAND_I32:

1407 return emitAtomicBinary(MI, BB);

1408 case Mips::ATOMIC_LOAD_NAND_I64:

1409 return emitAtomicBinary(MI, BB);

1410

1411 case Mips::ATOMIC_LOAD_SUB_I8:

1412 return emitAtomicBinaryPartword(MI, BB, 1);

1413 case Mips::ATOMIC_LOAD_SUB_I16:

1414 return emitAtomicBinaryPartword(MI, BB, 2);

1415 case Mips::ATOMIC_LOAD_SUB_I32:

1416 return emitAtomicBinary(MI, BB);

1417 case Mips::ATOMIC_LOAD_SUB_I64:

1418 return emitAtomicBinary(MI, BB);

1419

1420 case Mips::ATOMIC_SWAP_I8:

1421 return emitAtomicBinaryPartword(MI, BB, 1);

1422 case Mips::ATOMIC_SWAP_I16:

1423 return emitAtomicBinaryPartword(MI, BB, 2);

1424 case Mips::ATOMIC_SWAP_I32:

1425 return emitAtomicBinary(MI, BB);

1426 case Mips::ATOMIC_SWAP_I64:

1427 return emitAtomicBinary(MI, BB);

1428

1429 case Mips::ATOMIC_CMP_SWAP_I8:

1430 return emitAtomicCmpSwapPartword(MI, BB, 1);

1431 case Mips::ATOMIC_CMP_SWAP_I16:

1432 return emitAtomicCmpSwapPartword(MI, BB, 2);

1433 case Mips::ATOMIC_CMP_SWAP_I32:

1434 return emitAtomicCmpSwap(MI, BB);

1435 case Mips::ATOMIC_CMP_SWAP_I64:

1436 return emitAtomicCmpSwap(MI, BB);

1437

1438 case Mips::ATOMIC_LOAD_MIN_I8:

1439 return emitAtomicBinaryPartword(MI, BB, 1);

1440 case Mips::ATOMIC_LOAD_MIN_I16:

1441 return emitAtomicBinaryPartword(MI, BB, 2);

1442 case Mips::ATOMIC_LOAD_MIN_I32:

1443 return emitAtomicBinary(MI, BB);

1444 case Mips::ATOMIC_LOAD_MIN_I64:

1445 return emitAtomicBinary(MI, BB);

1446

1447 case Mips::ATOMIC_LOAD_MAX_I8:

1448 return emitAtomicBinaryPartword(MI, BB, 1);

1449 case Mips::ATOMIC_LOAD_MAX_I16:

1450 return emitAtomicBinaryPartword(MI, BB, 2);

1451 case Mips::ATOMIC_LOAD_MAX_I32:

1452 return emitAtomicBinary(MI, BB);

1453 case Mips::ATOMIC_LOAD_MAX_I64:

1454 return emitAtomicBinary(MI, BB);

1455

1456 case Mips::ATOMIC_LOAD_UMIN_I8:

1457 return emitAtomicBinaryPartword(MI, BB, 1);

1458 case Mips::ATOMIC_LOAD_UMIN_I16:

1459 return emitAtomicBinaryPartword(MI, BB, 2);

1460 case Mips::ATOMIC_LOAD_UMIN_I32:

1461 return emitAtomicBinary(MI, BB);

1462 case Mips::ATOMIC_LOAD_UMIN_I64:

1463 return emitAtomicBinary(MI, BB);

1464

1465 case Mips::ATOMIC_LOAD_UMAX_I8:

1466 return emitAtomicBinaryPartword(MI, BB, 1);

1467 case Mips::ATOMIC_LOAD_UMAX_I16:

1468 return emitAtomicBinaryPartword(MI, BB, 2);

1469 case Mips::ATOMIC_LOAD_UMAX_I32:

1470 return emitAtomicBinary(MI, BB);

1471 case Mips::ATOMIC_LOAD_UMAX_I64:

1472 return emitAtomicBinary(MI, BB);

1473

1474 case Mips::PseudoSDIV:

1475 case Mips::PseudoUDIV:

1476 case Mips::DIV:

1477 case Mips::DIVU:

1478 case Mips::MOD:

1479 case Mips::MODU:

1481 false);

1482 case Mips::SDIV_MM_Pseudo:

1483 case Mips::UDIV_MM_Pseudo:

1484 case Mips::SDIV_MM:

1485 case Mips::UDIV_MM:

1486 case Mips::DIV_MMR6:

1487 case Mips::DIVU_MMR6:

1488 case Mips::MOD_MMR6:

1489 case Mips::MODU_MMR6:

1491 case Mips::PseudoDSDIV:

1492 case Mips::PseudoDUDIV:

1493 case Mips::DDIV:

1494 case Mips::DDIVU:

1495 case Mips::DMOD:

1496 case Mips::DMODU:

1498

1499 case Mips::PseudoSELECT_I:

1500 case Mips::PseudoSELECT_I64:

1501 case Mips::PseudoSELECT_S:

1502 case Mips::PseudoSELECT_D32:

1503 case Mips::PseudoSELECT_D64:

1504 return emitPseudoSELECT(MI, BB, false, Mips::BNE);

1505 case Mips::PseudoSELECTFP_F_I:

1506 case Mips::PseudoSELECTFP_F_I64:

1507 case Mips::PseudoSELECTFP_F_S:

1508 case Mips::PseudoSELECTFP_F_D32:

1509 case Mips::PseudoSELECTFP_F_D64:

1510 return emitPseudoSELECT(MI, BB, true, Mips::BC1F);

1511 case Mips::PseudoSELECTFP_T_I:

1512 case Mips::PseudoSELECTFP_T_I64:

1513 case Mips::PseudoSELECTFP_T_S:

1514 case Mips::PseudoSELECTFP_T_D32:

1515 case Mips::PseudoSELECTFP_T_D64:

1516 return emitPseudoSELECT(MI, BB, true, Mips::BC1T);

1517 case Mips::PseudoD_SELECT_I:

1518 case Mips::PseudoD_SELECT_I64:

1519 return emitPseudoD_SELECT(MI, BB);

1520 case Mips::LDR_W:

1521 return emitLDR_W(MI, BB);

1522 case Mips::LDR_D:

1523 return emitLDR_D(MI, BB);

1524 case Mips::STR_W:

1525 return emitSTR_W(MI, BB);

1526 case Mips::STR_D:

1527 return emitSTR_D(MI, BB);

1528 }

1529}

1530

1531

1532

1534MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,

1536

1541

1542 unsigned AtomicOp;

1543 bool NeedsAdditionalReg = false;

1544 switch (MI.getOpcode()) {

1545 case Mips::ATOMIC_LOAD_ADD_I32:

1546 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;

1547 break;

1548 case Mips::ATOMIC_LOAD_SUB_I32:

1549 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;

1550 break;

1551 case Mips::ATOMIC_LOAD_AND_I32:

1552 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;

1553 break;

1554 case Mips::ATOMIC_LOAD_OR_I32:

1555 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;

1556 break;

1557 case Mips::ATOMIC_LOAD_XOR_I32:

1558 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;

1559 break;

1560 case Mips::ATOMIC_LOAD_NAND_I32:

1561 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;

1562 break;

1563 case Mips::ATOMIC_SWAP_I32:

1564 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;

1565 break;

1566 case Mips::ATOMIC_LOAD_ADD_I64:

1567 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;

1568 break;

1569 case Mips::ATOMIC_LOAD_SUB_I64:

1570 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;

1571 break;

1572 case Mips::ATOMIC_LOAD_AND_I64:

1573 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;

1574 break;

1575 case Mips::ATOMIC_LOAD_OR_I64:

1576 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;

1577 break;

1578 case Mips::ATOMIC_LOAD_XOR_I64:

1579 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;

1580 break;

1581 case Mips::ATOMIC_LOAD_NAND_I64:

1582 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;

1583 break;

1584 case Mips::ATOMIC_SWAP_I64:

1585 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;

1586 break;

1587 case Mips::ATOMIC_LOAD_MIN_I32:

1588 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;

1589 NeedsAdditionalReg = true;

1590 break;

1591 case Mips::ATOMIC_LOAD_MAX_I32:

1592 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;

1593 NeedsAdditionalReg = true;

1594 break;

1595 case Mips::ATOMIC_LOAD_UMIN_I32:

1596 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;

1597 NeedsAdditionalReg = true;

1598 break;

1599 case Mips::ATOMIC_LOAD_UMAX_I32:

1600 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;

1601 NeedsAdditionalReg = true;

1602 break;

1603 case Mips::ATOMIC_LOAD_MIN_I64:

1604 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;

1605 NeedsAdditionalReg = true;

1606 break;

1607 case Mips::ATOMIC_LOAD_MAX_I64:

1608 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;

1609 NeedsAdditionalReg = true;

1610 break;

1611 case Mips::ATOMIC_LOAD_UMIN_I64:

1612 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;

1613 NeedsAdditionalReg = true;

1614 break;

1615 case Mips::ATOMIC_LOAD_UMAX_I64:

1616 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;

1617 NeedsAdditionalReg = true;

1618 break;

1619 default:

1621 }

1622

1623 Register OldVal = MI.getOperand(0).getReg();

1625 Register Incr = MI.getOperand(2).getReg();

1627

1629

1630

1631

1632

1633

1634

1635

1636

1637

1638

1639

1640

1641

1642

1643

1644

1645

1646

1647

1648

1649

1650

1651

1652

1653

1654

1655

1656

1657

1658

1659

1660

1661

1662

1663

1664

1667

1670

1678 if (NeedsAdditionalReg) {

1680 RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));

1683 }

1684

1685 MI.eraseFromParent();

1686

1687 return BB;

1688}

1689

1692 unsigned SrcReg) const {

1695

1698 return BB;

1699 }

1700

1703 return BB;

1704 }

1705

1710

1712 int64_t ShiftImm = 32 - (Size * 8);

1713

1716

1717 return BB;

1718}

1719

1723 "Unsupported size for EmitAtomicBinaryPartial.");

1724

1733

1734 Register Dest = MI.getOperand(0).getReg();

1736 Register Incr = MI.getOperand(2).getReg();

1737

1738 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);

1749

1750 unsigned AtomicOp = 0;

1751 bool NeedsAdditionalReg = false;

1752 switch (MI.getOpcode()) {

1753 case Mips::ATOMIC_LOAD_NAND_I8:

1754 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;

1755 break;

1756 case Mips::ATOMIC_LOAD_NAND_I16:

1757 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;

1758 break;

1759 case Mips::ATOMIC_SWAP_I8:

1760 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;

1761 break;

1762 case Mips::ATOMIC_SWAP_I16:

1763 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;

1764 break;

1765 case Mips::ATOMIC_LOAD_ADD_I8:

1766 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;

1767 break;

1768 case Mips::ATOMIC_LOAD_ADD_I16:

1769 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;

1770 break;

1771 case Mips::ATOMIC_LOAD_SUB_I8:

1772 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;

1773 break;

1774 case Mips::ATOMIC_LOAD_SUB_I16:

1775 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;

1776 break;

1777 case Mips::ATOMIC_LOAD_AND_I8:

1778 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;

1779 break;

1780 case Mips::ATOMIC_LOAD_AND_I16:

1781 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;

1782 break;

1783 case Mips::ATOMIC_LOAD_OR_I8:

1784 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;

1785 break;

1786 case Mips::ATOMIC_LOAD_OR_I16:

1787 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;

1788 break;

1789 case Mips::ATOMIC_LOAD_XOR_I8:

1790 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;

1791 break;

1792 case Mips::ATOMIC_LOAD_XOR_I16:

1793 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;

1794 break;

1795 case Mips::ATOMIC_LOAD_MIN_I8:

1796 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;

1797 NeedsAdditionalReg = true;

1798 break;

1799 case Mips::ATOMIC_LOAD_MIN_I16:

1800 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;

1801 NeedsAdditionalReg = true;

1802 break;

1803 case Mips::ATOMIC_LOAD_MAX_I8:

1804 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;

1805 NeedsAdditionalReg = true;

1806 break;

1807 case Mips::ATOMIC_LOAD_MAX_I16:

1808 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;

1809 NeedsAdditionalReg = true;

1810 break;

1811 case Mips::ATOMIC_LOAD_UMIN_I8:

1812 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;

1813 NeedsAdditionalReg = true;

1814 break;

1815 case Mips::ATOMIC_LOAD_UMIN_I16:

1816 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;

1817 NeedsAdditionalReg = true;

1818 break;

1819 case Mips::ATOMIC_LOAD_UMAX_I8:

1820 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;

1821 NeedsAdditionalReg = true;

1822 break;

1823 case Mips::ATOMIC_LOAD_UMAX_I16:

1824 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;

1825 NeedsAdditionalReg = true;

1826 break;

1827 default:

1828 llvm_unreachable("Unknown subword atomic pseudo for expansion!");

1829 }

1830

1831

1835 MF->insert(It, exitMBB);

1836

1837

1841

1843

1844

1845

1846

1847

1848

1849

1850

1851

1852

1853

1854 int64_t MaskImm = (Size == 1) ? 255 : 65535;

1859 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)

1860 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);

1863 } else {

1868 }

1869 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)

1875

1876

1877

1878

1879

1880

1884 .addReg(AlignedAddr)

1895 if (NeedsAdditionalReg) {

1899 }

1900

1901 MI.eraseFromParent();

1902

1903 return exitMBB;

1904}

1905

1906

1907

1908

1909

1910

1912MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,

1914

1915 assert((MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||

1916 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&

1917 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");

1918

1919 const unsigned Size = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;

1920

1926

1927 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32

1928 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA

1929 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;

1930 Register Dest = MI.getOperand(0).getReg();

1932 Register OldVal = MI.getOperand(2).getReg();

1933 Register NewVal = MI.getOperand(3).getReg();

1934

1935 Register Scratch = MRI.createVirtualRegister(RC);

1937

1938

1939

1940

1941

1942

1943 Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));

1944 Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));

1945 Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));

1946

1950

1951

1952

1953

1954

1962

1963 MI.eraseFromParent();

1964

1965 return BB;

1966}

1967

1968MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(

1971 "Unsupported size for EmitAtomicCmpSwapPartial.");

1972

1981

1982 Register Dest = MI.getOperand(0).getReg();

1984 Register CmpVal = MI.getOperand(2).getReg();

1985 Register NewVal = MI.getOperand(3).getReg();

1986

1987 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);

1991 Register ShiftedCmpVal = RegInfo.createVirtualRegister(RC);

1992 Register ShiftedNewVal = RegInfo.createVirtualRegister(RC);

1996 Register MaskedCmpVal = RegInfo.createVirtualRegister(RC);

1997 Register MaskedNewVal = RegInfo.createVirtualRegister(RC);

1998 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8

1999 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA

2000 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;

2001

2002

2003

2004

2005

2006

2007

2008

2009

2010

2011

2014

2015

2019 MF->insert(It, exitMBB);

2020

2021

2025

2027

2028

2029

2030

2031

2032

2033

2034

2035

2036

2037

2038

2039

2040

2041 int64_t MaskImm = (Size == 1) ? 255 : 65535;

2042 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)

2044 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)

2046 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)

2047 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);

2050 } else {

2055 }

2056 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)

2061 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)

2063 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)

2065 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)

2067 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)

2069

2070

2071

2072

2073

2076 .addReg(AlignedAddr)

2078 .addReg(ShiftedCmpVal)

2080 .addReg(ShiftedNewVal)

2086

2087 MI.eraseFromParent();

2088

2089 return exitMBB;

2090}

2091

2093

2094

2095 SDValue Chain = Op.getOperand(0);

2096 SDValue Dest = Op.getOperand(2);

2098

2101

2102

2104 return Op;

2105

2112 FCC0, Dest, CondRes);

2113}

2114

2115SDValue MipsTargetLowering::

2117{

2120

2121

2123 return Op;

2124

2127}

2128

2132

2134 "Floating point operand expected.");

2135

2139

2141}

2142

2145 EVT Ty = Op.getValueType();

2148

2151 "Windows is the only supported COFF target");

2155 }

2156

2162 if (GO && TLOF->IsGlobalInSmallSection(GO, getTargetMachine()))

2163

2165

2166

2168

2170 }

2171

2172

2173

2174

2175

2176

2177

2178

2179

2180

2181

2182

2185

2189 DAG.getEntryNode(),

2191

2196}

2197

2201 EVT Ty = Op.getValueType();

2202

2206

2208}

2209

2210SDValue MipsTargetLowering::

2212{

2213

2214

2215

2216

2220

2224

2226

2228

2231

2237

2239

2241 ArgListEntry Entry;

2243 Entry.Ty = PtrTy;

2244 Args.push_back(Entry);

2245

2247 CLI.setDebugLoc(DL)

2249 .setLibCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));

2250 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);

2251

2253

2255 return Ret;

2256

2265 }

2266

2269

2273 TGA);

2276 } else {

2277

2286 }

2287

2290}

2291

2292SDValue MipsTargetLowering::

2294{

2296 EVT Ty = Op.getValueType();

2297

2301

2303}

2304

2305SDValue MipsTargetLowering::

2307{

2309 EVT Ty = Op.getValueType();

2310

2315

2318

2320

2323 }

2324

2326}

2327

2331

2335

2336

2337

2338 const Value *SV = cast(Op.getOperand(2))->getValue();

2339 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),

2341}

2342

2345 EVT VT = Node->getValueType(0);

2347 SDValue VAListPtr = Node->getOperand(1);

2350 const Value *SV = cast(Node->getOperand(2))->getValue();

2352 unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;

2353

2356 SDValue VAList = VAListLoad;

2357

2358

2359

2360

2361

2362

2363

2364

2369

2373 }

2374

2375

2377 unsigned ArgSizeInBytes =

2383

2386

2387

2388

2389

2390

2391

2392 if (Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {

2393 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;

2394 VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,

2396 }

2397

2399}

2400

2402 bool HasExtractInsert) {

2403 EVT TyX = Op.getOperand(0).getValueType();

2404 EVT TyY = Op.getOperand(1).getValueType();

2409

2410

2411

2412 SDValue X = (TyX == MVT::f32) ?

2415 Const1);

2416 SDValue Y = (TyY == MVT::f32) ?

2419 Const1);

2420

2421 if (HasExtractInsert) {

2422

2423

2426 } else {

2427

2428

2429

2430

2431

2437 }

2438

2439 if (TyX == MVT::f32)

2441

2443 Op.getOperand(0),

2446}

2447

2449 bool HasExtractInsert) {

2450 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();

2451 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();

2455

2456

2459

2460 if (HasExtractInsert) {

2461

2462

2464 DAG.getConstant(WidthY - 1, DL, MVT::i32), Const1);

2465

2466 if (WidthX > WidthY)

2468 else if (WidthY > WidthX)

2470

2472 DAG.getConstant(WidthX - 1, DL, MVT::i32), Const1,

2473 X);

2475 }

2476

2477

2478

2479

2480

2481

2486

2487 if (WidthX > WidthY)

2489 else if (WidthY > WidthX)

2491

2496}

2497

2502

2504}

2505

2507 bool HasExtractInsert) const {

2510

2513

2514

2515

2516 SDValue X = (Op.getValueType() == MVT::f32)

2519 Op.getOperand(0), Const1);

2520

2521

2522 if (HasExtractInsert)

2526 else {

2527

2528

2531 }

2532

2533 if (Op.getValueType() == MVT::f32)

2535

2536

2537

2538

2539

2544}

2545

2547 bool HasExtractInsert) const {

2550

2553

2554

2556

2557

2558 if (HasExtractInsert)

2560 DAG.getRegister(Mips::ZERO_64, MVT::i64),

2562 else {

2565 }

2566

2568}

2569

2571 if ((ABI.IsN32() || ABI.IsN64()) && (Op.getValueType() == MVT::f64))

2573

2575}

2576

2580 EVT VT = Op.getValueType();

2581 SDValue Operand = Op.getOperand(0);

2583

2585 return Operand;

2586

2589}

2590

2591SDValue MipsTargetLowering::

2593

2594 if (Op.getConstantOperandVal(0) != 0) {

2596 "return address can be determined only for current frame");

2598 }

2599

2602 EVT VT = Op.getValueType();

2606 return FrameAddr;

2607}

2608

2613

2614

2615 if (Op.getConstantOperandVal(0) != 0) {

2617 "return address can be determined only for current frame");

2619 }

2620

2623 MVT VT = Op.getSimpleValueType();

2624 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;

2626

2627

2630}

2631

2632

2633

2634

2635

2637 const {

2640

2642 SDValue Chain = Op.getOperand(0);

2644 SDValue Handler = Op.getOperand(2);

2646 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;

2647

2648

2649

2650 unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;

2651 unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;

2658}

2659

2662

2663

2664 unsigned SType = 0;

2668}

2669

2674

2676 SDValue Shamt = Op.getOperand(2);

2677

2678

2679

2680

2681

2682

2697

2700}

2701

2703 bool IsSRA) const {

2706 SDValue Shamt = Op.getOperand(2);

2708

2709

2710

2711

2712

2713

2714

2715

2716

2717

2718

2719

2720

2721

2731 DL, VT, Hi, Shamt);

2736

2741 DL, VTList, Cond, ShiftRightHi,

2743 ShiftRightHi);

2744 }

2745

2748 IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi);

2749

2752}

2753

2757 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();

2758 EVT BasePtrVT = Ptr.getValueType();

2761

2765

2766 SDValue Ops[] = { Chain, Ptr, Src };

2768 LD->getMemOperand());

2769}

2770

2771

2774 EVT MemVT = LD->getMemoryVT();

2775

2777 return Op;

2778

2779

2780 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) ||

2781 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))

2783

2785 EVT VT = Op.getValueType();

2787 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);

2788

2789 assert((VT == MVT::i32) || (VT == MVT::i64));

2790

2791

2792

2793

2794

2795

2798 IsLittle ? 7 : 0);

2800 IsLittle ? 0 : 7);

2801 }

2802

2804 IsLittle ? 3 : 0);

2806 IsLittle ? 0 : 3);

2807

2808

2809

2810

2811

2812

2813

2814

2815 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||

2817 return LWR;

2818

2820

2821

2822

2823

2824

2825

2826

2827

2832 SDValue Ops[] = { SRL, LWR.getValue(1) };

2834}

2835

2842

2846

2850}

2851

2852

2854 bool IsLittle) {

2856 EVT VT = Value.getValueType();

2857

2858

2859

2860

2861

2862

2863

2866 IsLittle ? 3 : 0);

2868 }

2869

2870 assert(VT == MVT::i64);

2871

2872

2873

2874

2875

2876

2879}

2880

2881

2883 bool SingleFloat) {

2885

2889

2896}

2897

2901

2902

2905 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))

2907

2909}

2910

2913

2914

2915

2917 EVT ValTy = Op->getValueType(0);

2920}

2921

2926

2929 Op.getOperand(0));

2931}

2932

2933

2934

2935

2936

2937

2938

2939

2940

2941

2942

2943

2944

2945

2946

2947

2948

2949

2950

2951

2952

2953

2954

2955

2956

2962

2963 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };

2964

2966

2967 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };

2968

2969 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };

2970

2971

2973 return true;

2974

2975

2977 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {

2978 LocVT = MVT::i32;

2979 if (ArgFlags.isSExt())

2981 else if (ArgFlags.isZExt())

2983 else

2985 }

2986 }

2987

2988

2989 if (LocVT == MVT::i8 || LocVT == MVT::i16) {

2990 LocVT = MVT::i32;

2991 if (ArgFlags.isSExt())

2993 else if (ArgFlags.isZExt())

2995 else

2997 }

2998

2999 unsigned Reg;

3000

3001

3002

3003

3004 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||

3007 bool isI64 = (ValVT == MVT::i32 && OrigAlign == Align(8));

3009

3010

3011 if (ValVT == MVT::i32 && isVectorFloat) {

3012

3013

3014

3015

3016 if (ArgFlags.isSplit()) {

3017 Reg = State.AllocateReg(FloatVectorIntRegs);

3018 if (Reg == Mips::A2)

3020 else if (Reg == 0)

3022 } else {

3023

3024

3026 }

3027 } else if (ValVT == MVT::i32 ||

3028 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {

3030

3031

3032 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))

3034 LocVT = MVT::i32;

3035 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {

3036

3037

3039 if (Reg == Mips::A1 || Reg == Mips::A3)

3041

3042 if (Reg) {

3043 LocVT = MVT::i32;

3044

3051 return false;

3052 }

3053 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {

3054

3055 if (ValVT == MVT::f32) {

3057

3059 } else {

3061

3063 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)

3066 }

3067 } else

3069

3070 if (!Reg) {

3073 } else

3075

3076 return false;

3077}

3078

3082 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };

3083

3084 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);

3085}

3086

3090 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };

3091

3092 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);

3093}

3094

3098

3099#include "MipsGenCallingConv.inc"

3100

3102 return CC_Mips_FixedArg;

3103 }

3104

3106 return RetCC_Mips;

3107 }

3108

3109

3110

3111

3112SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,

3114 const SDLoc &DL, bool IsTailCall,

3116 if (!IsTailCall) {

3121 }

3122

3128}

3129

3132 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,

3133 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,

3136

3137

3138

3139

3140

3141

3142

3143

3144

3145

3146 if (IsPICCall && !InternalLinkage && IsCallReloc) {

3147 unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;

3148 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;

3149 RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));

3150 }

3151

3152

3153

3154

3155

3157

3158 for (auto &R : RegsToPass) {

3159 Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InGlue);

3161 }

3162

3163

3164

3165 for (auto &R : RegsToPass)

3167

3168

3172 assert(Mask && "Missing call preserved mask for calling convention");

3176 Function *F = G->getGlobal()->getParent()->getFunction(Sym);

3177 if (F && F->hasFnAttribute("__Mips16RetHelper")) {

3179 }

3180 }

3181 }

3183

3186}

3187

3189 SDNode *Node) const {

3190 switch (MI.getOpcode()) {

3191 default:

3192 return;

3193 case Mips::JALR:

3194 case Mips::JALRPseudo:

3195 case Mips::JALR64:

3196 case Mips::JALR64Pseudo:

3197 case Mips::JALR16_MM:

3198 case Mips::JALRC16_MMR6:

3199 case Mips::TAILCALLREG:

3200 case Mips::TAILCALLREG64:

3201 case Mips::TAILCALLR6REG:

3202 case Mips::TAILCALL64R6REG:

3203 case Mips::TAILCALLREG_MM:

3204 case Mips::TAILCALLREG_MMR6: {

3208 Node->getNumOperands() < 1 ||

3209 Node->getOperand(0).getNumOperands() < 2) {

3210 return;

3211 }

3212

3213

3214

3215 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);

3218 dyn_cast_or_null(TargetAddr)) {

3219

3220

3221

3222 if (!isa(G->getGlobal())) {

3223 LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "

3224 << G->getGlobal()->getName() << "\n");

3225 return;

3226 }

3227 Sym = G->getGlobal()->getName();

3228 }

3230 dyn_cast_or_null(TargetAddr)) {

3231 Sym = ES->getSymbol();

3232 }

3233

3234 if (Sym.empty())

3235 return;

3236

3239 LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");

3241 }

3242 }

3243}

3244

3245

3246

3259 bool IsVarArg = CLI.IsVarArg;

3260

3266

3267

3272

3274 dyn_cast_or_null(Callee.getNode());

3275

3276

3277

3278

3279

3280

3281

3282

3283

3284

3285

3286

3287

3288

3289

3290

3291

3292

3293

3294

3295

3296

3297

3298

3299

3300 bool MemcpyInByVal = ES && StringRef(ES->getSymbol()) == "memcpy" &&

3303

3304

3305

3306 unsigned ReservedArgArea =

3308 CCInfo.AllocateStack(ReservedArgArea, Align(1));

3309

3310 CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),

3311 ES ? ES->getSymbol() : nullptr);

3312

3313

3314 unsigned StackSize = CCInfo.getStackSize();

3315

3316

3318

3319

3320

3321 bool InternalLinkage = false;

3322 if (IsTailCall) {

3323 IsTailCall = isEligibleForTailCallOptimization(

3326 InternalLinkage = G->getGlobal()->hasInternalLinkage();

3327 IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||

3328 G->getGlobal()->hasPrivateLinkage() ||

3329 G->getGlobal()->hasHiddenVisibility() ||

3330 G->getGlobal()->hasProtectedVisibility());

3331 }

3332 }

3334 report_fatal_error("failed to perform tail call elimination on a call "

3335 "site marked musttail");

3336

3337 if (IsTailCall)

3338 ++NumTailCalls;

3339

3340

3341

3342

3344 StackSize = alignTo(StackSize, StackAlignment);

3345

3346 if (!(IsTailCall || MemcpyInByVal))

3348

3352

3353 std::deque<std::pair<unsigned, SDValue>> RegsToPass;

3355

3356 CCInfo.rewindByValRegsInfo();

3357

3358

3359 for (unsigned i = 0, e = ArgLocs.size(), OutIdx = 0; i != e; ++i, ++OutIdx) {

3360 SDValue Arg = OutVals[OutIdx];

3364 bool UseUpperBits = false;

3365

3366

3367 if (Flags.isByVal()) {

3368 unsigned FirstByValReg, LastByValReg;

3369 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();

3370 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);

3371

3373 "ByVal args of size 0 should have been ignored by front-end.");

3374 assert(ByValIdx < CCInfo.getInRegsParamsCount());

3375 assert(!IsTailCall &&

3376 "Do not tail-call optimize if there is a byval argument.");

3377 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,

3379 VA);

3380 CCInfo.nextInRegsParam();

3381 continue;

3382 }

3383

3384

3386 default:

3390 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||

3391 (ValVT == MVT::f64 && LocVT == MVT::i64) ||

3392 (ValVT == MVT::i64 && LocVT == MVT::f64))

3394 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {

3401

3403

3405 Register LocRegHigh = ArgLocs[++i].getLocReg();

3406 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));

3407 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));

3408 continue;

3409 }

3410 }

3411 break;

3414 break;

3416 UseUpperBits = true;

3417 [[fallthrough]];

3420 break;

3422 UseUpperBits = true;

3423 [[fallthrough]];

3426 break;

3428 UseUpperBits = true;

3429 [[fallthrough]];

3432 break;

3433 }

3434

3435 if (UseUpperBits) {

3436 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();

3441 }

3442

3443

3444

3446 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));

3447

3448

3449

3451 continue;

3452

3453

3455 if (Options.EmitCallSiteInfo)

3457

3458 continue;

3459 }

3460

3461

3463

3464

3465

3467 Chain, Arg, DL, IsTailCall, DAG));

3468 }

3469

3470

3471

3472 if (!MemOpChains.empty())

3474

3475

3476

3477

3478

3480 bool GlobalOrExternal = false, IsCallReloc = false;

3481

3482

3483

3484

3486

3487

3488

3489 if (auto *N = dyn_cast(Callee)) {

3494 } else if (auto *N = dyn_cast(Callee)) {

3496

3497

3498 if (auto *F = dyn_cast(N->getGlobal())) {

3499 if (F->hasFnAttribute("long-call"))

3500 UseLongCalls = true;

3501 else if (F->hasFnAttribute("short-call"))

3502 UseLongCalls = false;

3503 }

3504 if (UseLongCalls)

3508 }

3509 }

3510

3513 G->getGlobal()->hasDLLImportStorageClass()) {

3515 "Windows is the only supported COFF target");

3519 } else if (IsPIC) {

3522

3523 if (InternalLinkage)

3529 IsCallReloc = true;

3530 } else {

3533 IsCallReloc = true;

3534 }

3535 } else

3539 GlobalOrExternal = true;

3540 }

3541 else if (ExternalSymbolSDNode *S = dyn_cast(Callee)) {

3542 const char *Sym = S->getSymbol();

3543

3544 if (!IsPIC)

3551 IsCallReloc = true;

3552 } else {

3555 IsCallReloc = true;

3556 }

3557

3558 GlobalOrExternal = true;

3559 }

3560

3563

3564 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,

3565 IsCallReloc, CLI, Callee, Chain);

3566

3567 if (IsTailCall) {

3571 return Ret;

3572 }

3573

3576

3578

3579

3580

3581 if (!(MemcpyInByVal)) {

3582 Chain = DAG.getCALLSEQ_END(Chain, StackSize, 0, InGlue, DL);

3584 }

3585

3586

3587

3588 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,

3589 InVals, CLI);

3590}

3591

3592

3593

3594SDValue MipsTargetLowering::LowerCallResult(

3599

3603

3605 dyn_cast_or_null(CLI.Callee.getNode());

3606 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.RetTy,

3607 ES ? ES->getSymbol() : nullptr);

3608

3609

3610 for (unsigned i = 0; i != RVLocs.size(); ++i) {

3612 assert(VA.isRegLoc() && "Can only return in registers!");

3613

3615 RVLocs[i].getLocVT(), InGlue);

3618

3620 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();

3622 unsigned Shift =

3627 }

3628

3630 default:

3633 break;

3636 break;

3640 break;

3646 break;

3652 break;

3653 }

3654

3656 }

3657

3658 return Chain;

3659}

3660

3666

3667

3669 default:

3670 break;

3676 unsigned Opcode =

3681 break;

3682 }

3683 }

3684

3685

3686

3687

3688

3690 default:

3693 break;

3697 break;

3702 break;

3707 break;

3710 break;

3711 }

3712

3713 return Val;

3714}

3715

3716

3717

3718

3719

3720

3721SDValue MipsTargetLowering::LowerFormalArguments(

3728

3730

3731

3732 std::vector OutChains;

3733

3734

3741

3742 if (Func.hasFnAttribute("interrupt") && Func.arg_empty())

3744 "Functions with the interrupt attribute cannot have arguments!");

3745

3746 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);

3748 CCInfo.getInRegsParamsCount() > 0);

3749

3750 unsigned CurArgIdx = 0;

3751 CCInfo.rewindByValRegsInfo();

3752

3753 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {

3755 if (Ins[InsIdx].isOrigArg()) {

3756 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);

3757 CurArgIdx = Ins[InsIdx].getOrigArgIndex();

3758 }

3761 bool IsRegLoc = VA.isRegLoc();

3762

3763 if (Flags.isByVal()) {

3764 assert(Ins[InsIdx].isOrigArg() && "Byval arguments cannot be implicit");

3765 unsigned FirstByValReg, LastByValReg;

3766 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();

3767 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);

3768

3770 "ByVal args of size 0 should have been ignored by front-end.");

3771 assert(ByValIdx < CCInfo.getInRegsParamsCount());

3772 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,

3773 FirstByValReg, LastByValReg, VA, CCInfo);

3774 CCInfo.nextInRegsParam();

3775 continue;

3776 }

3777

3778

3779 if (IsRegLoc) {

3783

3784

3785

3788

3789 ArgValue =

3791

3792

3793

3794 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||

3795 (RegVT == MVT::i64 && ValVT == MVT::f64) ||

3796 (RegVT == MVT::f64 && ValVT == MVT::i64))

3798 else if (ABI.IsO32() && RegVT == MVT::i32 &&

3799 ValVT == MVT::f64) {

3800 assert(VA.needsCustom() && "Expected custom argument for f64 split");

3802 unsigned Reg2 =

3808 ArgValue, ArgValue2);

3809 }

3810

3812 } else {

3814

3815 assert(!VA.needsCustom() && "unexpected custom memory argument");

3816

3817

3819

3820

3823

3824

3827 LocVT, DL, Chain, FIN,

3829 OutChains.push_back(ArgValue.getValue(1));

3830

3831 ArgValue =

3833

3835 }

3836 }

3837

3838 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {

3839

3840 if (ArgLocs[i].needsCustom()) {

3841 ++i;

3842 continue;

3843 }

3844

3845

3846

3847

3848 if (Ins[InsIdx].Flags.isSRet()) {

3850 if (!Reg) {

3854 }

3857 break;

3858 }

3859 }

3860

3861 if (IsVarArg)

3862 writeVarArgRegs(OutChains, Chain, DL, DAG, CCInfo);

3863

3864

3865

3866 if (!OutChains.empty()) {

3867 OutChains.push_back(Chain);

3869 }

3870

3871 return Chain;

3872}

3873

3874

3875

3876

3877

3878bool

3879MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,

3884 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);

3885 return CCInfo.CheckCallReturn(Outs, RetCC_Mips, RetTy);

3886}

3887

3888bool MipsTargetLowering::shouldSignExtendTypeInLibCall(Type *Ty,

3889 bool IsSigned) const {

3891 return true;

3892

3893 return IsSigned;

3894}

3895

3902

3904

3906}

3907

3910 bool IsVarArg,

3914

3915

3918

3919

3921

3922

3923 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);

3924

3927

3928

3929 for (unsigned i = 0; i != RVLocs.size(); ++i) {

3930 SDValue Val = OutVals[i];

3932 assert(VA.isRegLoc() && "Can only return in registers!");

3933 bool UseUpperBits = false;

3934

3936 default:

3939 break;

3942 break;

3944 UseUpperBits = true;

3945 [[fallthrough]];

3948 break;

3950 UseUpperBits = true;

3951 [[fallthrough]];

3954 break;

3956 UseUpperBits = true;

3957 [[fallthrough]];

3960 break;

3961 }

3962

3963 if (UseUpperBits) {

3964 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();

3969 }

3970

3972

3973

3976 }

3977

3978

3979

3980

3981

3985

3986 if (!Reg)

3987 llvm_unreachable("sret virtual register not created in the entry block");

3990 unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;

3991

3995 }

3996

3997 RetOps[0] = Chain;

3998

3999

4002

4003

4005 return LowerInterruptReturn(RetOps, DL, DAG);

4006

4007

4009}

4010

4011

4012

4013

4014

4015

4016

4018MipsTargetLowering::getConstraintType(StringRef Constraint) const {

4019

4020

4021

4022

4023

4024

4025

4026

4027

4028

4029

4030 if (Constraint.size() == 1) {

4031 switch (Constraint[0]) {

4032 default : break;

4033 case 'd':

4034 case 'y':

4035 case 'f':

4036 case 'c':

4037 case 'l':

4038 case 'x':

4040 case 'R':

4042 }

4043 }

4044

4045 if (Constraint == "ZC")

4047

4049}

4050

4051

4052

4053

4055MipsTargetLowering::getSingleConstraintMatchWeight(

4056 AsmOperandInfo &info, const char *constraint) const {

4058 Value *CallOperandVal = info.CallOperandVal;

4059

4060

4061 if (!CallOperandVal)

4064

4065 switch (*constraint) {

4066 default:

4068 break;

4069 case 'd':

4070 case 'y':

4073 break;

4074 case 'f':

4080 break;

4081 case 'c':

4082 case 'l':

4083 case 'x':

4086 break;

4087 case 'I':

4088 case 'J':

4089 case 'K':

4090 case 'L':

4091 case 'N':

4092 case 'O':

4093 case 'P':

4094 if (isa(CallOperandVal))

4096 break;

4097 case 'R':

4099 break;

4100 }

4101 return weight;

4102}

4103

4104

4105

4106

4107

4109 unsigned long long &Reg) {

4110 if (C.front() != '{' || C.back() != '}')

4111 return std::make_pair(false, false);

4112

4113

4115 I = std::find_if(B, E, isdigit);

4116

4118

4119

4120 if (I == E)

4121 return std::make_pair(true, false);

4122

4123

4125 true);

4126}

4127

4132 return VT.bitsLT(MinVT) ? MinVT : VT;

4133}

4134

4135std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::

4136parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {

4141 unsigned long long Reg;

4142

4144

4145 if (!R.first)

4146 return std::make_pair(0U, nullptr);

4147

4148 if ((Prefix == "hi" || Prefix == "lo")) {

4149

4150 if (R.second)

4151 return std::make_pair(0U, nullptr);

4152

4153 RC = TRI->getRegClass(Prefix == "hi" ?

4154 Mips::HI32RegClassID : Mips::LO32RegClassID);

4155 return std::make_pair(*(RC->begin()), RC);

4156 } else if (Prefix.starts_with("$msa")) {

4157

4158

4159

4160 if (R.second)

4161 return std::make_pair(0U, nullptr);

4162

4164 .Case("$msair", Mips::MSAIR)

4165 .Case("$msacsr", Mips::MSACSR)

4166 .Case("$msaaccess", Mips::MSAAccess)

4167 .Case("$msasave", Mips::MSASave)

4168 .Case("$msamodify", Mips::MSAModify)

4169 .Case("$msarequest", Mips::MSARequest)

4170 .Case("$msamap", Mips::MSAMap)

4171 .Case("$msaunmap", Mips::MSAUnmap)

4173

4174 if (!Reg)

4175 return std::make_pair(0U, nullptr);

4176

4177 RC = TRI->getRegClass(Mips::MSACtrlRegClassID);

4178 return std::make_pair(Reg, RC);

4179 }

4180

4181 if (R.second)

4182 return std::make_pair(0U, nullptr);

4183

4184 if (Prefix == "$f") {

4185

4186

4187 if (VT == MVT::Other)

4189

4191

4192 if (RC == &Mips::AFGR64RegClass) {

4193 assert(Reg % 2 == 0);

4194 Reg >>= 1;

4195 }

4196 } else if (Prefix == "$fcc")

4197 RC = TRI->getRegClass(Mips::FCCRegClassID);

4198 else if (Prefix == "$w") {

4199 RC = getRegClassFor((VT == MVT::Other) ? MVT::v16i8 : VT);

4200 } else {

4201 assert(Prefix == "$");

4202 RC = getRegClassFor((VT == MVT::Other) ? MVT::i32 : VT);

4203 }

4204

4205 assert(Reg < RC->getNumRegs());

4206 return std::make_pair(*(RC->begin() + Reg), RC);

4207}

4208

4209

4210

4211

4212std::pair<unsigned, const TargetRegisterClass *>

4215 MVT VT) const {

4216 if (Constraint.size() == 1) {

4217 switch (Constraint[0]) {

4218 case 'd':

4219 case 'y':

4220 case 'r':

4221 if ((VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 ||

4222 VT == MVT::i1) ||

4225 return std::make_pair(0U, &Mips::CPU16RegsRegClass);

4226 return std::make_pair(0U, &Mips::GPR32RegClass);

4227 }

4230 return std::make_pair(0U, &Mips::GPR32RegClass);

4233 return std::make_pair(0U, &Mips::GPR64RegClass);

4234

4235 return std::make_pair(0U, nullptr);

4236 case 'f':

4237 if (VT == MVT::v16i8)

4238 return std::make_pair(0U, &Mips::MSA128BRegClass);

4239 else if (VT == MVT::v8i16 || VT == MVT::v8f16)

4240 return std::make_pair(0U, &Mips::MSA128HRegClass);

4241 else if (VT == MVT::v4i32 || VT == MVT::v4f32)

4242 return std::make_pair(0U, &Mips::MSA128WRegClass);

4243 else if (VT == MVT::v2i64 || VT == MVT::v2f64)

4244 return std::make_pair(0U, &Mips::MSA128DRegClass);

4245 else if (VT == MVT::f32)

4246 return std::make_pair(0U, &Mips::FGR32RegClass);

4249 return std::make_pair(0U, &Mips::FGR64RegClass);

4250 return std::make_pair(0U, &Mips::AFGR64RegClass);

4251 }

4252 break;

4253 case 'c':

4254 if (VT == MVT::i32)

4255 return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass);

4256 if (VT == MVT::i64)

4257 return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);

4258

4259 return std::make_pair(0U, nullptr);

4260 case 'l':

4261

4262 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)

4263 return std::make_pair((unsigned)Mips::LO0, &Mips::LO32RegClass);

4264 return std::make_pair((unsigned)Mips::LO0_64, &Mips::LO64RegClass);

4265 case 'x':

4266

4267

4268

4269 return std::make_pair(0U, nullptr);

4270 }

4271 }

4272

4273 if (!Constraint.empty()) {

4274 std::pair<unsigned, const TargetRegisterClass *> R;

4275 R = parseRegForInlineAsmConstraint(Constraint, VT);

4276

4277 if (R.second)

4278 return R;

4279 }

4280

4282}

4283

4284

4285

4286void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,

4288 std::vector &Ops,

4292

4293

4294 if (Constraint.size() > 1)

4295 return;

4296

4297 char ConstraintLetter = Constraint[0];

4298 switch (ConstraintLetter) {

4299 default: break;

4300 case 'I':

4301

4304 int64_t Val = C->getSExtValue();

4305 if (isInt<16>(Val)) {

4307 break;

4308 }

4309 }

4310 return;

4311 case 'J':

4314 int64_t Val = C->getZExtValue();

4315 if (Val == 0) {

4317 break;

4318 }

4319 }

4320 return;

4321 case 'K':

4325 if (isUInt<16>(Val)) {

4327 break;

4328 }

4329 }

4330 return;

4331 case 'L':

4334 int64_t Val = C->getSExtValue();

4335 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){

4337 break;

4338 }

4339 }

4340 return;

4341 case 'N':

4344 int64_t Val = C->getSExtValue();

4345 if ((Val >= -65535) && (Val <= -1)) {

4347 break;

4348 }

4349 }

4350 return;

4351 case 'O':

4354 int64_t Val = C->getSExtValue();

4355 if ((isInt<15>(Val))) {

4357 break;

4358 }

4359 }

4360 return;

4361 case 'P':

4364 int64_t Val = C->getSExtValue();

4365 if ((Val <= 65535) && (Val >= 1)) {

4367 break;

4368 }

4369 }

4370 return;

4371 }

4372

4373 if (Result.getNode()) {

4374 Ops.push_back(Result);

4375 return;

4376 }

4377

4379}

4380

4381bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,

4383 unsigned AS,

4385

4386 if (AM.BaseGV)

4387 return false;

4388

4389 switch (AM.Scale) {

4390 case 0:

4391 break;

4392 case 1:

4393 if (!AM.HasBaseReg)

4394 break;

4395 return false;

4396 default:

4397 return false;

4398 }

4399

4400 return true;

4401}

4402

4403bool

4404MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {

4405

4406 return false;

4407}

4408

4409EVT MipsTargetLowering::getOptimalMemOpType(

4412 return MVT::i64;

4413

4414 return MVT::i32;

4415}

4416

4417bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,

4418 bool ForCodeSize) const {

4419 if (VT != MVT::f32 && VT != MVT::f64)

4420 return false;

4421 if (Imm.isNegZero())

4422 return false;

4423 return Imm.isZero();

4424}

4425

4426unsigned MipsTargetLowering::getJumpTableEncoding() const {

4427

4428

4431

4433}

4434

4435bool MipsTargetLowering::useSoftFloat() const {

4437}

4438

4439void MipsTargetLowering::copyByValRegs(

4440 SDValue Chain, const SDLoc &DL, std::vector &OutChains,

4443 unsigned FirstReg, unsigned LastReg, const CCValAssign &VA,

4448 unsigned NumRegs = LastReg - FirstReg;

4449 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;

4450 unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);

4451 int FrameObjOffset;

4453

4454 if (RegAreaSize)

4455 FrameObjOffset =

4457 (int)((ByValArgRegs.size() - FirstReg) * GPRSizeInBytes);

4458 else

4460

4461

4463

4464

4465

4466

4467

4468 int FI = MFI.CreateFixedObject(FrameObjSize, FrameObjOffset, false, true);

4471

4472 if (!NumRegs)

4473 return;

4474

4475

4478

4479 for (unsigned I = 0; I < NumRegs; ++I) {

4480 unsigned ArgReg = ByValArgRegs[FirstReg + I];

4481 unsigned VReg = addLiveIn(MF, ArgReg, RC);

4482 unsigned Offset = I * GPRSizeInBytes;

4487 OutChains.push_back(Store);

4488 }

4489}

4490

4491

4492void MipsTargetLowering::passByValArg(

4494 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,

4497 unsigned LastReg, const ISD::ArgFlagsTy &Flags, bool isLittle,

4499 unsigned ByValSizeInBytes = Flags.getByValSize();

4500 unsigned OffsetInBytes = 0;

4502 Align Alignment =

4503 std::min(Flags.getNonZeroByValAlign(), Align(RegSizeInBytes));

4506 unsigned NumRegs = LastReg - FirstReg;

4507

4508 if (NumRegs) {

4510 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);

4511 unsigned I = 0;

4512

4513

4514 for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) {

4520 unsigned ArgReg = ArgRegs[FirstReg + I];

4521 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));

4522 }

4523

4524

4525 if (ByValSizeInBytes == OffsetInBytes)

4526 return;

4527

4528

4529 if (LeftoverBytes) {

4531

4532 for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;

4533 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {

4534 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;

4535

4536 if (RemainingSizeInBytes < LoadSizeInBytes)

4537 continue;

4538

4539

4542 PtrTy));

4547

4548

4549 unsigned Shamt;

4550

4551 if (isLittle)

4552 Shamt = TotalBytesLoaded * 8;

4553 else

4554 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;

4555

4558

4561 else

4562 Val = Shift;

4563

4564 OffsetInBytes += LoadSizeInBytes;

4565 TotalBytesLoaded += LoadSizeInBytes;

4566 Alignment = std::min(Alignment, Align(LoadSizeInBytes));

4567 }

4568

4569 unsigned ArgReg = ArgRegs[FirstReg + I];

4570 RegsToPass.push_back(std::make_pair(ArgReg, Val));

4571 return;

4572 }

4573 }

4574

4575

4576 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;

4582 Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, DL, PtrTy),

4583 Align(Alignment), false, false,

4586}

4587

4588void MipsTargetLowering::writeVarArgRegs(std::vector &OutChains,

4600

4601

4602 int VaArgOffset;

4603

4604 if (ArgRegs.size() == Idx)

4606 else {

4607 VaArgOffset =

4609 (int)(RegSizeInBytes * (ArgRegs.size() - Idx));

4610 }

4611

4612

4613

4614 int FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);

4616

4617

4618

4619

4620

4621 for (unsigned I = Idx; I < ArgRegs.size();

4622 ++I, VaArgOffset += RegSizeInBytes) {

4629 cast(Store.getNode())->getMemOperand()->setValue(

4630 (Value *)nullptr);

4631 OutChains.push_back(Store);

4632 }

4633}

4634

4636 Align Alignment) const {

4638

4639 assert(Size && "Byval argument's size shouldn't be 0.");

4640

4641 Alignment = std::min(Alignment, TFL->getStackAlign());

4642

4643 unsigned FirstReg = 0;

4644 unsigned NumRegs = 0;

4645

4649

4652

4653

4654

4656 Alignment >= Align(RegSizeInBytes) &&

4657 "Byval argument's alignment should be a multiple of RegSizeInBytes.");

4658

4660

4661

4662

4663

4664

4665 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {

4666 State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);

4667 ++FirstReg;

4668 }

4669

4670

4672 for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());

4673 Size -= RegSizeInBytes, ++I, ++NumRegs)

4674 State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);

4675 }

4676

4678}

4679

4682 bool isFPCmp,

4683 unsigned Opc) const {

4685 "Subtarget already supports SELECT nodes with the use of"

4686 "conditional-move instructions.");

4687

4691

4692

4693

4694

4695

4698

4699

4700

4701

4702

4703

4704

4709 F->insert(It, copy0MBB);

4710 F->insert(It, sinkMBB);

4711

4712

4716

4717

4720

4721 if (isFPCmp) {

4722

4724 .addReg(MI.getOperand(1).getReg())

4726 } else {

4727

4729 .addReg(MI.getOperand(1).getReg())

4732 }

4733

4734

4735

4736

4737 BB = copy0MBB;

4738

4739

4741

4742

4743

4744

4745 BB = sinkMBB;

4746

4747 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())

4748 .addReg(MI.getOperand(2).getReg())

4750 .addReg(MI.getOperand(3).getReg())

4752

4753 MI.eraseFromParent();

4754

4755 return BB;

4756}

4757

4759MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,

4762 "Subtarget already supports SELECT nodes with the use of"

4763 "conditional-move instructions.");

4764

4767

4768

4769

4770

4771

4772

4775

4776

4777

4778

4779

4780

4781

4786 F->insert(It, copy0MBB);

4787 F->insert(It, sinkMBB);

4788

4789

4793

4794

4797

4798

4800 .addReg(MI.getOperand(2).getReg())

4803

4804

4805

4806

4807 BB = copy0MBB;

4808

4809

4811

4812

4813

4814

4815 BB = sinkMBB;

4816

4817

4818 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())

4819 .addReg(MI.getOperand(3).getReg())

4821 .addReg(MI.getOperand(5).getReg())

4823 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(1).getReg())

4824 .addReg(MI.getOperand(4).getReg())

4826 .addReg(MI.getOperand(6).getReg())

4828

4829 MI.eraseFromParent();

4830

4831 return BB;

4832}

4833

4834

4835

4839

4842 .Case("$28", Mips::GP_64)

4843 .Case("sp", Mips::SP_64)

4845 if (Reg)

4846 return Reg;

4847 } else {

4849 .Case("$28", Mips::GP)

4850 .Case("sp", Mips::SP)

4852 if (Reg)

4853 return Reg;

4854 }

4856}

4857

4865

4866 Register Dest = MI.getOperand(0).getReg();

4868 unsigned Imm = MI.getOperand(2).getImm();

4869

4871

4873

4874 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4880 } else {

4881

4882

4883 Register LoadHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4884 Register LoadFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4885 Register Undef = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4890 .addImm(Imm + (IsLittle ? 0 : 3))

4895 .addImm(Imm + (IsLittle ? 3 : 0))

4898 }

4899

4900 MI.eraseFromParent();

4901 return BB;

4902}

4903

4911

4912 Register Dest = MI.getOperand(0).getReg();

4914 unsigned Imm = MI.getOperand(2).getImm();

4915

4917

4919

4921 Register Temp = MRI.createVirtualRegister(&Mips::GPR64RegClass);

4927 } else {

4928 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);

4929 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4930 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4934 .addImm(Imm + (IsLittle ? 0 : 4));

4938 .addImm(Imm + (IsLittle ? 4 : 0));

4940 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)

4944 }

4945 } else {

4946

4947

4948 Register LoHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4949 Register LoFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4950 Register LoUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4951 Register HiHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4952 Register HiFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4953 Register HiUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);

4954 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);

4959 .addImm(Imm + (IsLittle ? 0 : 7))

4964 .addImm(Imm + (IsLittle ? 3 : 4))

4970 .addImm(Imm + (IsLittle ? 4 : 3))

4975 .addImm(Imm + (IsLittle ? 7 : 0))

4978 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)

4982 }

4983

4984 MI.eraseFromParent();

4985 return BB;

4986}

4987

4995

4996 Register StoreVal = MI.getOperand(0).getReg();

4998 unsigned Imm = MI.getOperand(2).getImm();

4999

5001

5003

5004 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);

5005 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);

5015 } else {

5016

5017

5018 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);

5026 .addImm(Imm + (IsLittle ? 0 : 3));

5030 .addImm(Imm + (IsLittle ? 3 : 0));

5031 }

5032

5033 MI.eraseFromParent();

5034

5035 return BB;

5036}

5037

5045

5046 Register StoreVal = MI.getOperand(0).getReg();

5048 unsigned Imm = MI.getOperand(2).getImm();

5049

5051

5053

5055 Register BitcastD = MRI.createVirtualRegister(&Mips::MSA128DRegClass);

5056 Register Lo = MRI.createVirtualRegister(&Mips::GPR64RegClass);

5068 } else {

5069 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);

5070 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);

5071 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);

5086 .addImm(Imm + (IsLittle ? 0 : 4));

5090 .addImm(Imm + (IsLittle ? 4 : 0));

5091 }

5092 } else {

5093

5094

5095 Register Bitcast = MRI.createVirtualRegister(&Mips::MSA128WRegClass);

5096 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);

5097 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);

5110 .addImm(Imm + (IsLittle ? 0 : 3));

5114 .addImm(Imm + (IsLittle ? 3 : 0));

5118 .addImm(Imm + (IsLittle ? 4 : 7));

5122 .addImm(Imm + (IsLittle ? 7 : 4));

5123 }

5124

5125 MI.eraseFromParent();

5126 return BB;

5127}

unsigned const MachineRegisterInfo * MRI

static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)

static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)

This file declares a class to represent arbitrary precision floating point values and provide a varie...

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Function Alias Analysis Results

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

#define LLVM_ATTRIBUTE_UNUSED

This file contains the declarations for the subclasses of Constant, which represent the different fla...

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")

const HexagonInstrInfo * TII

Module.h This file contains the declarations for the Module class.

static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)

unsigned const TargetRegisterInfo * TRI

cl::opt< bool > EmitJalrReloc

static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)

static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) LLVM_ATTRIBUTE_UNUSED

static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)

static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, ArrayRef< MCPhysReg > F64Regs)

static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, const MipsSubtarget &Subtarget)

static bool invertFPCondCodeUser(Mips::CondCode CC)

This function returns true if the floating point conditional branches and conditional moves which use...

static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG, bool SingleFloat)

static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)

static const MCPhysReg Mips64DPRegs[8]

static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)

static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG, bool IsLittle)

static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, SDValue Chain, unsigned Offset)

static unsigned addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)

static std::pair< bool, bool > parsePhysicalReg(StringRef C, StringRef &Prefix, unsigned long long &Reg)

This is a helper function to parse a physical register string and split it into non-numeric and numer...

static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, SDValue Chain, SDValue Src, unsigned Offset)

static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)

static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)

cl::opt< bool > EmitJalrReloc

static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)

static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op)

static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)

static cl::opt< bool > NoZeroDivCheck("mno-check-zero-division", cl::Hidden, cl::desc("MIPS: Don't trap on integer division by zero."), cl::init(false))

static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)

static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)

static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA, EVT ArgVT, const SDLoc &DL, SelectionDAG &DAG)

static Mips::CondCode condCodeToFCC(ISD::CondCode CC)

static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, SDValue False, const SDLoc &DL)

uint64_t IntrinsicInst * II

static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")

const SmallVectorImpl< MachineOperand > & Cond

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

SI optimize exec mask operations pre RA

static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)

This file defines the SmallVector class.

static const MCPhysReg IntRegs[32]

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...

static const MCPhysReg F32Regs[64]

This class represents an incoming formal argument to a Function.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

LLVM Basic Block Representation.

static BranchProbability getOne()

CCState - This class holds information needed while lowering arguments and return values.

MachineFunction & getMachineFunction() const

unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const

getFirstUnallocated - Return the index of the first unallocated register in the set,...

CallingConv::ID getCallingConv() const

MCRegister AllocateReg(MCPhysReg Reg)

AllocateReg - Attempt to allocate one register.

int64_t AllocateStack(unsigned Size, Align Alignment)

AllocateStack - Allocate a chunk of stack space with the specified size and alignment.

uint64_t getStackSize() const

Returns the size of the currently allocated portion of the stack.

void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)

void addLoc(const CCValAssign &V)

CCValAssign - Represent assignment of one arg/retval to a location.

Register getLocReg() const

LocInfo getLocInfo() const

static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)

static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)

bool isUpperBitsInLoc() const

static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)

int64_t getLocMemOffset() const

bool isMustTailCall() const

Tests if this call site must be tail call optimized.

uint64_t getZExtValue() const

int64_t getSExtValue() const

This class represents an Operation in the Expression.

A parsed version of the target data layout string in and methods for querying it.

TypeSize getTypeAllocSize(Type *Ty) const

Returns the offset in bytes between successive objects of the specified type, including alignment pad...

const char * getSymbol() const

This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...

FunctionLoweringInfo - This contains information that is global to a function that is used when lower...

bool hasStructRetAttr() const

Determine if the function returns a structure through first or second pointer argument.

bool hasFnAttribute(Attribute::AttrKind Kind) const

Return true if the function has the attribute.

const GlobalValue * getGlobal() const

bool hasLocalLinkage() const

bool hasDLLImportStorageClass() const

const GlobalObject * getAliaseeObject() const

bool hasInternalLinkage() const

Class to represent integer types.

This is an important class for using LLVM in a threaded context.

void emitError(const Instruction *I, const Twine &ErrorStr)

emitError - Emit an error message to the currently installed error handler with optional location inf...

This class is used to represent ISD::LOAD nodes.

MCSymbol * getOrCreateSymbol(const Twine &Name)

Lookup the symbol inside with the specified Name.

Wrapper class representing physical registers. Should be passed by value.

MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...

static auto integer_valuetypes()

TypeSize getSizeInBits() const

Returns the size of the specified MVT in bits.

TypeSize getStoreSize() const

Return the number of bytes overwritten by a store of the specified value type.

static MVT getVectorVT(MVT VT, unsigned NumElements)

bool isFloatingPoint() const

Return true if this is a FP or a vector FP type.

bool isValid() const

Return true if this is a valid simple valuetype.

static MVT getIntegerVT(unsigned BitWidth)

static auto fp_valuetypes()

static auto fp_fixedlen_vector_valuetypes()

void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)

Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...

const BasicBlock * getBasicBlock() const

Return the LLVM basic block that this instance corresponded to originally.

void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())

Add Succ as a successor of this MachineBasicBlock.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

void splice(iterator Where, MachineBasicBlock *Other, iterator From)

Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)

Create a new object at a fixed location on the stack.

void setFrameAddressIsTaken(bool T)

void setHasTailCall(bool V=true)

void setReturnAddressIsTaken(bool s)

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

MCContext & getContext() const

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

const DataLayout & getDataLayout() const

Return the DataLayout attached to the Module associated to this MF.

Function & getFunction()

Return the LLVM function that this machine code represents.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)

addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...

MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)

CreateMachineBasicBlock - Allocate a new MachineBasicBlock.

void insert(iterator MBBI, MachineBasicBlock *MBB)

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register use operand.

const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register definition operand.

Representation of each machine instruction.

const MachineOperand & getOperand(unsigned i) const

@ EK_GPRel64BlockAddress

EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...

@ MOVolatile

The memory access is volatile.

Flags getFlags() const

Return the raw flags of the source value,.

MachineOperand class - Representation of each machine instruction operand.

void setSubReg(unsigned subReg)

static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)

void setIsKill(bool Val=true)

Register getReg() const

getReg - Returns the register number.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")

createVirtualRegister - Create and return a new virtual register in the function with the specified r...

void addLiveIn(MCRegister Reg, Register vreg=Register())

addLiveIn - Add the specified register as a live-in.

MachineMemOperand * getMemOperand() const

Return a MachineMemOperand object describing the memory reference performed by operation.

const MachinePointerInfo & getPointerInfo() const

const SDValue & getChain() const

EVT getMemoryVT() const

Return the type of the in-memory value.

ArrayRef< MCPhysReg > GetVarArgRegs() const

The registers to use for the variable argument list.

bool ArePtrs64bit() const

unsigned GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const

Obtain the size of the area allocated by the callee for arguments.

unsigned GetPtrAddiuOp() const

unsigned GetPtrAndOp() const

ArrayRef< MCPhysReg > GetByValArgRegs() const

The registers to use for byval arguments.

unsigned GetNullPtr() const

bool WasOriginalArgVectorFloat(unsigned ValNo) const

static SpecialCallingConvType getSpecialCallingConvForCallee(const SDNode *Callee, const MipsSubtarget &Subtarget)

Determine the SpecialCallingConvType for the given callee.

MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...

void setVarArgsFrameIndex(int Index)

unsigned getSRetReturnReg() const

int getVarArgsFrameIndex() const

MachinePointerInfo callPtrInfo(MachineFunction &MF, const char *ES)

Create a MachinePointerInfo that has an ExternalSymbolPseudoSourceValue object representing a GOT ent...

Register getGlobalBaseReg(MachineFunction &MF)

void setSRetReturnReg(unsigned Reg)

void setFormalArgInfo(unsigned Size, bool HasByval)

static const uint32_t * getMips16RetHelperMask()

bool inMicroMipsMode() const

bool useSoftFloat() const

const MipsInstrInfo * getInstrInfo() const override

bool inMips16Mode() const

bool inAbs2008Mode() const

const MipsRegisterInfo * getRegisterInfo() const override

bool systemSupportsUnalignedAccess() const

Does the system support unaligned memory access.

bool hasExtractInsert() const

Features related to the presence of specific instructions.

bool isTargetCOFF() const

bool isTargetWindows() const

bool isSingleFloat() const

bool useLongCalls() const

unsigned getGPRSizeInBytes() const

bool inMips16HardFloat() const

const TargetFrameLowering * getFrameLowering() const override

MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override

Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...

bool hasBitTest(SDValue X, SDValue Y) const override

Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...

static const MipsTargetLowering * create(const MipsTargetMachine &TM, const MipsSubtarget &STI)

SDValue getAddrGPRel(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN64) const

unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override

Break down vectors to the correct number of gpr sized integers.

Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override

Return the register ID of the name passed in.

const char * getTargetNodeName(unsigned Opcode) const override

getTargetNodeName - This method returns the name of a target specific

SDValue getAddrNonPICSym64(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const

EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override

getSetCCResultType - get the ISD::SETCC result ValueType

SDValue getAddrGlobal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flag, SDValue Chain, const MachinePointerInfo &PtrInfo) const

FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override

createFastISel - This method returns a target specific FastISel object, or null if the target does no...

MipsTargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)

SDValue getAddrGlobalLargeGOT(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned HiFlag, unsigned LoFlag, SDValue Chain, const MachinePointerInfo &PtrInfo) const

SDValue getDllimportVariable(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, SDValue Chain, const MachinePointerInfo &PtrInfo) const

SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override

This method will be invoked for all target nodes and for any target-independent nodes that the target...

CCAssignFn * CCAssignFnForReturn() const

void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override

ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...

MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override

This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...

SDValue getDllimportSymbol(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const

CCAssignFn * CCAssignFnForCall() const

unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override

Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...

SDValue getAddrNonPIC(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const

SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const

void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override

This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.

virtual void getOpndList(SmallVectorImpl< SDValue > &Ops, std::deque< std::pair< unsigned, SDValue > > &RegsToPass, bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const

This function fills Ops, which is the list of operands that will later be used when a function call n...

EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const override

Return the type that should be used to zero or sign extend a zeroext/signext integer return value.

bool isCheapToSpeculateCtlz(Type *Ty) const override

Return true if it is cheap to speculate a call to intrinsic ctlz.

SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override

LowerOperation - Provide custom lowering hooks for some operations.

bool isCheapToSpeculateCttz(Type *Ty) const override

Return true if it is cheap to speculate a call to intrinsic cttz.

bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override

Return true if it is profitable to fold a pair of shifts into a mask.

SDValue getAddrLocal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN32OrN64) const

SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const

const MipsSubtarget & Subtarget

void HandleByVal(CCState *, unsigned &, Align) const override

Target-specific cleanup for formal ByVal parameters.

SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const

bool IsConstantInSmallSection(const DataLayout &DL, const Constant *CN, const TargetMachine &TM) const

Return true if this constant should be placed into small data section.

Wrapper class representing virtual and physical registers.

Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...

Represents one node in the SelectionDAG.

unsigned getOpcode() const

Return the SelectionDAG opcode value for this node.

uint64_t getAsZExtVal() const

Helper method returns the zero-extended integer value of a ConstantSDNode.

const SDValue & getOperand(unsigned Num) const

EVT getValueType(unsigned ResNo) const

Return the type of a specified result.

Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.

SDNode * getNode() const

get the SDNode which holds the desired result

SDValue getValue(unsigned R) const

EVT getValueType() const

Return the ValueType of the referenced return value.

TypeSize getValueSizeInBits() const

Returns the size of the value in bits.

const SDValue & getOperand(unsigned i) const

unsigned getOpcode() const

This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...

SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)

SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)

SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)

Create a MERGE_VALUES node from the given operands.

SDVTList getVTList(EVT VT)

Return an SDVTList that represents the list of values specified.

SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)

SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)

Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...

SDValue getRegister(Register Reg, EVT VT)

SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)

Loads are not normal binary operators: their result type is not determined by their operands,...

SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)

SDValue getUNDEF(EVT VT)

Return an UNDEF node. UNDEF does not have a useful SDLoc.

SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)

Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).

SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)

const DataLayout & getDataLayout() const

SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

Create a ConstantSDNode wrapping a constant value.

SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)

SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

Helper function to build ISD::STORE nodes.

SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)

Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...

SDValue getExternalSymbol(const char *Sym, EVT VT)

const TargetMachine & getTarget() const

SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)

Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...

SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)

SDValue getValueType(EVT)

SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)

Gets or creates the specified node.

bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const

Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.

SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)

SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)

void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)

Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.

MachineFunction & getMachineFunction() const

SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)

SDValue getRegisterMask(const uint32_t *RegMask)

void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)

Set CallSiteInfo to be associated with Node.

LLVMContext * getContext() const

SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())

Creates a MemIntrinsicNode that may produce a result and takes a list of operands.

SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)

SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)

SDValue getEntryNode() const

Return the token chain corresponding to the entry of the function.

std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)

Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

This class is used to represent ISD::STORE nodes.

const SDValue & getBasePtr() const

const SDValue & getValue() const

bool isTruncatingStore() const

Return true if the op does a truncation before store.

StringRef - Represent a constant reference to a string, i.e.

constexpr bool empty() const

empty - Check if the string is empty.

constexpr size_t size() const

size - Get the string size.

A switch()-like statement whose cases are string literals.

StringSwitch & Case(StringLiteral S, T Value)

Information about stack frame layout on the target.

unsigned getStackAlignment() const

getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...

Align getStackAlign() const

getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...

TargetInstrInfo - Interface to description of machine instruction set.

Provides information about what library functions are available for the current target.

void setBooleanVectorContents(BooleanContent Ty)

Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...

void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)

Indicate that the specified operation does not work with the specified type and indicate what to do a...

virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const

Return the register class that should be used for the specified value type.

void setMinStackArgumentAlignment(Align Alignment)

Set the minimum stack alignment of an argument.

const TargetMachine & getTargetMachine() const

virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const

Return the number of registers that this ValueType will eventually require.

void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)

Set the maximum atomic operation size supported by the backend.

void setMinFunctionAlignment(Align Alignment)

Set the target's minimum function alignment.

void setBooleanContents(BooleanContent Ty)

Specify how the target extends the result of integer and floating point boolean values from i1 to a w...

virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const

Return the pointer type for the given address space, defaults to the pointer type from the data layou...

void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified truncating store does not work with the specified type and indicate what ...

@ ZeroOrOneBooleanContent

@ ZeroOrNegativeOneBooleanContent

void setStackPointerRegisterToSaveRestore(Register R)

If set to a physical register, this specifies the register that llvm.savestack/llvm....

void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)

If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...

void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)

Targets should invoke this method for each target independent node that they want to provide a custom...

Align getMinStackArgumentAlignment() const

Return the minimum stack alignment of an argument.

void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified load with extension does not work with the specified type and indicate wh...

std::vector< ArgListEntry > ArgListTy

unsigned MaxStoresPerMemcpy

Specify maximum number of store instructions per memcpy call.

MVT getRegisterType(MVT VT) const

Return the type of registers that this ValueType will eventually require.

This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...

virtual ConstraintType getConstraintType(StringRef Constraint) const

Given a constraint, return the type of constraint it is for this target.

virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const

Lower TLS global address SDNode for target independent emulated TLS model.

std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const

This function lowers an abstract call to a function into an actual call.

bool isPositionIndependent() const

virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const

Examine constraint string and operand type and determine a weight value.

virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const

Given a physical register constraint (e.g.

bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const

virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const

Lower the specified operand into the Ops vector.

virtual unsigned getJumpTableEncoding() const

Return the entry encoding for a jump table in the current function.

virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const

This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...

TLSModel::Model getTLSModel(const GlobalValue *GV) const

Returns the TLS model which should be used for the given global variable.

bool useEmulatedTLS() const

Returns true if this target uses emulated TLS.

virtual TargetLoweringObjectFile * getObjFileLowering() const

unsigned NoNaNsFPMath

NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...

iterator begin() const

begin/end - Return all of the registers in this class.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

The instances of the Type class are immutable: once they are created, they are never changed.

bool isVectorTy() const

True if this is an instance of VectorType.

bool isFloatTy() const

Return true if this is 'float', a 32-bit IEEE fp type.

static IntegerType * getIntNTy(LLVMContext &C, unsigned N)

bool isIntegerTy() const

True if this is an instance of IntegerType.

TypeSize getPrimitiveSizeInBits() const LLVM_READONLY

Return the basic size of this type if it is a primitive type.

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

constexpr ScalarTy getFixedValue() const

self_iterator getIterator()

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Args[]

Key for Kernel::Metadata::mArgs.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

@ Fast

Attempts to make calls as fast as possible (e.g.

@ C

The default llvm calling convention, compatible with C.

NodeType

ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.

@ SETCC

SetCC operator - This evaluates to a true value iff the condition is true.

@ STACKRESTORE

STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.

@ STACKSAVE

STACKSAVE - STACKSAVE has one operand, an input chain.

@ BSWAP

Byte Swap and Counting operators.

@ VAEND

VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.

@ ATOMIC_STORE

OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.

@ ADD

Simple integer binary arithmetic operators.

@ LOAD

LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...

@ ANY_EXTEND

ANY_EXTEND - Used for integer types. The high bits are undefined.

@ FMA

FMA - Perform a * b + c with no intermediate rounding step.

@ FADD

Simple binary floating point operators.

@ ATOMIC_FENCE

OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.

@ SDIVREM

SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.

@ FP16_TO_FP

FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...

@ BITCAST

BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...

@ BUILD_PAIR

BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.

@ EH_RETURN

OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...

@ SIGN_EXTEND

Conversion operators.

@ FSINCOS

FSINCOS - Compute both fsin and fcos as a single operation.

@ BR_CC

BR_CC - Conditional branch.

@ BR_JT

BR_JT - Jumptable branch.

@ FCANONICALIZE

Returns platform specific canonical encoding of a floating point number.

@ IS_FPCLASS

Performs a check of floating point class property, defined by IEEE-754.

@ SELECT

Select(COND, TRUEVAL, FALSEVAL).

@ ATOMIC_LOAD

Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.

@ VACOPY

VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...

@ SHL

Shift and rotation operations.

@ FMINNUM_IEEE

FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...

@ ZERO_EXTEND

ZERO_EXTEND - Used for integer types, zeroing the new bits.

@ SELECT_CC

Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...

@ FMINNUM

FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.

@ DYNAMIC_STACKALLOC

DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.

@ SIGN_EXTEND_INREG

SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...

@ EH_DWARF_CFA

EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...

@ FRAMEADDR

FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.

@ FP_TO_SINT

FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.

@ AND

Bitwise operators - logical and, logical or, logical xor.

@ TRAP

TRAP - Trapping instruction.

@ TokenFactor

TokenFactor - This node takes multiple tokens as input and produces a single token result.

@ TRUNCATE

TRUNCATE - Completely drop the high bits.

@ VAARG

VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.

@ BRCOND

BRCOND - Conditional branch.

@ SHL_PARTS

SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.

@ AssertSext

AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...

@ FCOPYSIGN

FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.

@ CALLSEQ_START

CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...

CondCode getSetCCInverse(CondCode Operation, EVT Type)

Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.

CondCode

ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...

LoadExtType

LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).

@ Bitcast

Perform the operation on a different, but equivalently sized type.

Flag

These should be considered private to the implementation of the MCInstrDesc class.

@ MO_GOT_CALL

MO_GOT_CALL - Represents the offset into the global offset table at which the address of a call site ...

@ MO_TPREL_HI

MO_TPREL_HI/LO - Represents the hi and low part of the offset from.

@ MO_GOT

MO_GOT - Represents the offset into the global offset table at which the address the relocation entry...

@ MO_JALR

Helper operand used to generate R_MIPS_JALR.

@ MO_GOTTPREL

MO_GOTTPREL - Represents the offset from the thread pointer (Initial.

@ MO_GOT_HI16

MO_GOT_HI16/LO16, MO_CALL_HI16/LO16 - Relocations used for large GOTs.

@ MO_TLSLDM

MO_TLSLDM - Represents the offset into the global offset table at which.

@ MO_TLSGD

MO_TLSGD - Represents the offset into the global offset table at which.

FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)

@ Implicit

Not emitted register (e.g. carry, or temporary result).

@ Define

Register definition.

@ Kill

The last use of a register.

@ EarlyClobber

Register definition happens before uses.

Not(const Pred &P) -> Not< Pred >

Reg

All possible values of the reg field in the ModR/M byte.

initializer< Ty > init(const Ty &Val)

NodeAddr< FuncNode * > Func

This is an optimization pass for GlobalISel generic memory operations.

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

constexpr bool isShiftedMask_64(uint64_t Value)

Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

void report_fatal_error(Error Err, bool gen_crash_diag=true)

Report a serious error, calling any installed error handler.

constexpr T divideCeil(U Numerator, V Denominator)

Returns the integer ceil(Numerator / Denominator).

bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)

CCAssignFn - This function assigns a location for Val, updating State to reflect the change.

const MipsTargetLowering * createMips16TargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)

Create MipsTargetLowering objects.

@ Or

Bitwise or logical OR of integers.

unsigned getKillRegState(bool B)

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

DWARFExpression::Operation Op

const MipsTargetLowering * createMipsSETargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)

bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)

Helper functions for StringRef::getAsInteger.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

uint64_t value() const

This is a hole in the type system and should not be abused.

EVT changeVectorElementTypeToInteger() const

Return a vector with the same number of elements as this vector, but with the element type converted ...

bool bitsLT(EVT VT) const

Return true if this has less bits than VT.

TypeSize getSizeInBits() const

Return the size of the specified value type in bits.

bool isPow2VectorType() const

Returns true if the given vector is a power of 2.

MVT getSimpleVT() const

Return the SimpleValueType held in the specified simple EVT.

static EVT getFloatingPointVT(unsigned BitWidth)

Returns the EVT that represents a floating-point type with the given number of bits.

bool isVector() const

Return true if this is a vector value type.

Type * getTypeForEVT(LLVMContext &Context) const

This method returns an LLVM type corresponding to the specified EVT.

bool isRound() const

Return true if the size is a power-of-two number of bytes.

EVT getVectorElementType() const

Given a vector type, return the type of each element.

unsigned getVectorNumElements() const

Given a vector type, return the number of elements it contains.

bool isInteger() const

Return true if this is an integer or a vector integer type.

Align getNonZeroOrigAlign() const

SmallVector< ArgRegPair, 1 > ArgRegPairs

Vector of call argument and its forwarding register.

This class contains a discriminated union of information about pointers in memory operands,...

static MachinePointerInfo getGOT(MachineFunction &MF)

Return a MachinePointerInfo record that refers to a GOT entry.

static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.

Align valueOrOne() const

For convenience, returns a valid alignment or 1 if undefined.

These are IR-level optimization flags that may be propagated to SDNodes.

This represents a list of ValueType's that has been intern'd by a SelectionDAG.

This structure contains all information that is necessary for lowering calls.

SmallVector< ISD::InputArg, 32 > Ins

SmallVector< ISD::OutputArg, 32 > Outs

SmallVector< SDValue, 32 > OutVals

bool isBeforeLegalizeOps() const