LLVM: lib/Target/XCore/XCoreISelLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

32#include "llvm/IR/IntrinsicsXCore.h"

37#include

38

39using namespace llvm;

40

41#define DEBUG_TYPE "xcore-lower"

42

45 : TargetLowering(TM, Subtarget), TM(TM), Subtarget(Subtarget) {

46

47

49

50

52

54

56

57

60

61

64

65

75

76

81

83

84

86

89

90

92

93

98

101 }

102

103

106

107

112

113

117

118

121

123

124

127

128

130

134

135

138

141

142

144}

145

147 if (Val.getOpcode() != ISD::LOAD)

148 return false;

149

153 return false;

154

156 default: break;

157 case MVT::i8:

158 return true;

159 }

160

161 return false;

162}

163

166 switch (Op.getOpcode())

167 {

172 case ISD::BR_JT: return LowerBR_JT(Op, DAG);

173 case ISD::LOAD: return LowerLOAD(Op, DAG);

174 case ISD::STORE: return LowerSTORE(Op, DAG);

175 case ISD::VAARG: return LowerVAARG(Op, DAG);

176 case ISD::VASTART: return LowerVASTART(Op, DAG);

179

181 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);

185 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);

186 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);

188 case ISD::ATOMIC_FENCE:

189 return LowerATOMIC_FENCE(Op, DAG);

190 default:

192 }

193}

194

195

196

200 switch (N->getOpcode()) {

201 default:

205 Results.push_back(ExpandADDSUB(N, DAG));

206 return;

207 }

208}

209

210

211

212

213

214SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,

217

219

221 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);

222

226 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);

227

228 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);

229}

230

233 return true;

234

237 return false;

238

240 unsigned ObjSize = DL.getTypeAllocSize(ObjType);

242}

243

244SDValue XCoreTargetLowering::

246{

248 const GlobalValue *GV = GN->getGlobal();

249 SDLoc DL(GN);

252

253 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);

255 GA = getGlobalAddressWrapper(GA, GV, DAG);

256

257 if (Offset != FoldedOffset) {

261 }

262 return GA;

263 } else {

264

271 DAG.getEntryNode(), CP, MachinePointerInfo());

272 }

273}

274

275SDValue XCoreTargetLowering::

277{

278 SDLoc DL(Op);

282

283 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);

284}

285

286SDValue XCoreTargetLowering::

288{

290

291 SDLoc dl(CP);

292 EVT PtrVT = Op.getValueType();

294 if (CP->isMachineConstantPoolEntry()) {

296 CP->getAlign(), CP->getOffset());

297 } else {

299 CP->getOffset());

300 }

301 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);

302}

303

307

308SDValue XCoreTargetLowering::

310{

311 SDValue Chain = Op.getOperand(0);

312 SDValue Table = Op.getOperand(1);

313 SDValue Index = Op.getOperand(2);

316 unsigned JTI = JT->getIndex();

320

321 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();

322 if (NumEntries <= 32) {

323 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);

324 }

325 assert((NumEntries >> 31) == 0);

328 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,

329 ScaledIndex);

330}

331

332SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(

336 if ((Offset & 0x3) == 0) {

337 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());

338 }

339

341 int32_t LowOffset = HighOffset - 4;

342 SDValue LowAddr, HighAddr;

343 if (GlobalAddressSDNode *GASD =

346 LowOffset);

348 HighOffset);

349 } else {

354 }

357

358 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());

359 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());

364 High.getValue(1));

367}

368

374

380 "Unexpected extension type");

381 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");

382

384 LD->getMemoryVT(), *LD->getMemOperand()))

386

389 SDLoc DL(Op);

390

391 if (LD->isVolatile()) {

392 const GlobalValue *GV;

398 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,

400 }

404 BasePtr->getValueType(0));

405 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,

407 }

408 }

409

410 if (LD->getAlign() == Align(2)) {

412 LD->getPointerInfo(), MVT::i16, Align(2),

413 LD->getMemOperand()->getFlags());

418 LD->getPointerInfo().getWithOffset(2), MVT::i16,

419 Align(2), LD->getMemOperand()->getFlags());

424 High.getValue(1));

427 }

428

429

432 Args.emplace_back(BasePtr, IntPtrTy);

433

434 TargetLowering::CallLoweringInfo CLI(DAG);

435 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(

439 std::move(Args));

440

441 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);

442 SDValue Ops[] = { CallResult.first, CallResult.second };

444}

445

449 assert(ST->isTruncatingStore() && "Unexpected store type");

450 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");

451

453 ST->getMemoryVT(), *ST->getMemOperand()))

455

459 SDLoc dl(Op);

460

461 if (ST->getAlign() == Align(2)) {

467 MVT::i16, Align(2), ST->getMemOperand()->getFlags());

471 Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),

472 MVT::i16, Align(2), ST->getMemOperand()->getFlags());

474 }

475

476

479 Args.emplace_back(BasePtr, IntPtrTy);

480 Args.emplace_back(Value, IntPtrTy);

481

482 TargetLowering::CallLoweringInfo CLI(DAG);

483 CLI.setDebugLoc(dl).setChain(Chain).setCallee(

487 std::move(Args));

488

489 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);

490 return CallResult.second;

491}

492

493SDValue XCoreTargetLowering::

495{

497 "Unexpected operand to lower!");

498 SDLoc dl(Op);

503 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,

508}

509

510SDValue XCoreTargetLowering::

512{

514 "Unexpected operand to lower!");

515 SDLoc dl(Op);

521 Zero, Zero);

525}

526

527

528

529

530

531

532static bool

534 SDValue &Addend1, bool requireIntermediatesHaveOneUse)

535{

537 return false;

543 AddOp = N0;

544 OtherOp = N1;

546 AddOp = N1;

547 OtherOp = N0;

548 } else {

549 return false;

550 }

551 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())

552 return false;

554

555 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())

556 return false;

561 return true;

562 }

563 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {

564

565 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())

566 return false;

570 Addend1 = OtherOp;

571 return true;

572 }

573 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {

574

575 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())

576 return false;

580 Addend1 = OtherOp;

581 return true;

582 }

583 return false;

584}

585

586SDValue XCoreTargetLowering::

588{

591 if (N->getOperand(0).getOpcode() == ISD::MUL) {

592 Mul = N->getOperand(0);

593 Other = N->getOperand(1);

594 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {

595 Mul = N->getOperand(1);

596 Other = N->getOperand(0);

597 } else {

599 }

600 SDLoc dl(N);

601 SDValue LL, RL, AddendL, AddendH;

603 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));

605 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));

615

617 DAG.getVTList(MVT::i32, MVT::i32), AddendH,

618 AddendL, LL, RL);

621 }

622 if (LHSSB > 32 && RHSSB > 32) {

623

625 DAG.getVTList(MVT::i32, MVT::i32), AddendH,

626 AddendL, LL, RL);

629 }

632 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));

634 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));

636 DAG.getVTList(MVT::i32, MVT::i32), AddendH,

637 AddendL, LL, RL);

644}

645

646SDValue XCoreTargetLowering::

648{

649 assert(N->getValueType(0) == MVT::i64 &&

651 "Unknown operand to lower!");

652

654 if (SDValue Result = TryExpandADDWithMul(N, DAG))

656

657 SDLoc dl(N);

658

659

661 N->getOperand(0),

664 N->getOperand(0),

667 N->getOperand(1),

670 N->getOperand(1),

672

673

674 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :

675 XCoreISD::LSUB;

678 LHSL, RHSL, Zero);

680

682 LHSH, RHSH, Carry);

683 SDValue Ignored(Hi.getNode(), 1);

684

686}

687

688SDValue XCoreTargetLowering::

690{

691

692

693 SDNode *Node = Op.getNode();

694 EVT VT = Node->getValueType(0);

699 SDLoc dl(Node);

701 DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));

702

705 dl));

706

707 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,

708 MachinePointerInfo(SV));

709

710 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());

711}

712

713SDValue XCoreTargetLowering::

715{

716 SDLoc dl(Op);

717

718

720 XCoreFunctionInfo *XFI = MF.getInfo();

722 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),

723 MachinePointerInfo());

724}

725

728

729

730

731

732

733 if (Op.getConstantOperandVal(0) > 0)

735

737 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();

740}

741

742SDValue XCoreTargetLowering::

744

745

746

747

748

749 if (Op.getConstantOperandVal(0) > 0)

751

753 XCoreFunctionInfo *XFI = MF.getInfo();

759}

760

761SDValue XCoreTargetLowering::

763

764

765

766

767 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32);

768}

769

770SDValue XCoreTargetLowering::

772

773

774

775

777 SDValue Chain = Op.getOperand(0);

779 SDValue Handler = Op.getOperand(2);

780 SDLoc dl(Op);

781

782

783 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();

786 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,

787 MVT::i32);

790

791

792

793 unsigned StackReg = XCore::R2;

794 unsigned HandlerReg = XCore::R3;

795

797 DAG.getCopyToReg(Chain, dl, StackReg, Stack),

798 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)

799 };

800

802

803 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,

806

807}

808

809SDValue XCoreTargetLowering::

811 return Op.getOperand(0);

812}

813

814SDValue XCoreTargetLowering::

816 SDValue Chain = Op.getOperand(0);

817 SDValue Trmp = Op.getOperand(1);

818 SDValue FPtr = Op.getOperand(2);

819 SDValue Nest = Op.getOperand(3);

820

822

823

824

825

826

827

828

829

830

831

832

833

835

837

838 SDLoc dl(Op);

839 OutChains[0] =

841 MachinePointerInfo(TrmpAddr));

842

845 OutChains[1] =

847 MachinePointerInfo(TrmpAddr, 4));

848

851 OutChains[2] =

853 MachinePointerInfo(TrmpAddr, 8));

854

857 OutChains[3] =

858 DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));

859

862 OutChains[4] =

863 DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));

864

866}

867

868SDValue XCoreTargetLowering::

870 SDLoc DL(Op);

871 unsigned IntNo = Op.getConstantOperandVal(0);

872 switch (IntNo) {

873 case Intrinsic::xcore_crc8:

874 EVT VT = Op.getValueType();

877 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));

881 }

883}

884

885SDValue XCoreTargetLowering::

887 SDLoc DL(Op);

888 return DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));

889}

890

891

892

893

894

895#include "XCoreGenCallingConv.inc"

896

897

898

899

900

901

905 SelectionDAG &DAG = CLI.DAG;

906 SDLoc &dl = CLI.DL;

907 SmallVectorImplISD::OutputArg &Outs = CLI.Outs;

908 SmallVectorImpl &OutVals = CLI.OutVals;

909 SmallVectorImplISD::InputArg &Ins = CLI.Ins;

914 bool isVarArg = CLI.IsVarArg;

915

916

917 isTailCall = false;

918

919

920 switch (CallConv)

921 {

922 default:

926 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,

927 Outs, OutVals, Ins, dl, DAG, InVals);

928 }

929}

930

931

932

938

940 if (VA.isRegLoc()) {

941 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),

945 } else {

946 assert(VA.isMemLoc());

947 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),

948 InVals.size()));

949

951 }

952 }

953

954

956 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {

957 int offset = ResultMemLocs[i].first;

958 unsigned index = ResultMemLocs[i].second;

962 InVals[index] = load;

964 }

965

966

967

968 if (!MemOpChains.empty())

970

971 return Chain;

972}

973

974

975

976

977

978SDValue XCoreTargetLowering::LowerCCCCallTo(

984

985

987 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,

989

990

991

992 CCInfo.AllocateStack(4, Align(4));

993

994 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);

995

997

998 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,

1000 RetCCInfo.AllocateStack(CCInfo.getStackSize(), Align(4));

1001 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);

1002

1003

1004 unsigned NumBytes = RetCCInfo.getStackSize();

1005

1007

1010

1011

1012 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {

1013 CCValAssign &VA = ArgLocs[i];

1014 SDValue Arg = OutVals[i];

1015

1016

1022 break;

1025 break;

1028 break;

1029 }

1030

1031

1032

1035 } else {

1037

1039

1040 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,

1041 Chain, Arg,

1043 MVT::i32)));

1044 }

1045 }

1046

1047

1048

1049 if (!MemOpChains.empty())

1051

1052

1053

1054

1055

1057 for (const auto &[Reg, N] : RegsToPass) {

1060 }

1061

1062

1063

1064

1069

1070

1071

1072

1073

1074 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);

1076 Ops.push_back(Chain);

1077 Ops.push_back(Callee);

1078

1079

1080

1081 for (const auto &[Reg, N] : RegsToPass)

1083

1085 Ops.push_back(InGlue);

1086

1087 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);

1089

1090

1091 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, dl);

1093

1094

1095

1096 return LowerCallResult(Chain, InGlue, RVLocs, dl, DAG, InVals);

1097}

1098

1099

1100

1101

1102

1103namespace {

1104 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };

1105}

1106

1107

1108SDValue XCoreTargetLowering::LowerFormalArguments(

1112 switch (CallConv)

1113 {

1114 default:

1118 return LowerCCCArguments(Chain, CallConv, isVarArg,

1119 Ins, dl, DAG, InVals);

1120 }

1121}

1122

1123

1124

1125

1126

1127SDValue XCoreTargetLowering::LowerCCCArguments(

1133 MachineRegisterInfo &RegInfo = MF.getRegInfo();

1134 XCoreFunctionInfo *XFI = MF.getInfo();

1135

1136

1138 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,

1140

1141 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);

1142

1144

1145 unsigned LRSaveSize = StackSlotSize;

1146

1147 if (!isVarArg)

1149

1150

1151

1152

1153

1154

1155

1156

1160

1161

1162 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {

1163

1164 CCValAssign &VA = ArgLocs[i];

1166

1168

1171 default:

1172 {

1173#ifndef NDEBUG

1174 errs() << "LowerFormalArguments Unhandled argument type: "

1175 << RegVT << "\n";

1176#endif

1178 }

1179 case MVT::i32:

1184 }

1185 } else {

1186

1188

1190 if (ObjSize > StackSlotSize) {

1191 errs() << "LowerFormalArguments Unhandled argument type: "

1193 }

1194

1197 true);

1198

1199

1200

1204 }

1205 const ArgDataPair ADP = { ArgIn, Ins[i].Flags };

1207 }

1208

1209

1210 if (isVarArg) {

1211

1212 static const MCPhysReg ArgRegs[] = {

1213 XCore::R0, XCore::R1, XCore::R2, XCore::R3

1214 };

1215 XCoreFunctionInfo *XFI = MF.getInfo();

1216 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);

1217 if (FirstVAReg < std::size(ArgRegs)) {

1218 int offset = 0;

1219

1220

1221 for (int i = std::size(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {

1222

1224 if (i == (int)FirstVAReg) {

1226 }

1227 offset -= StackSlotSize;

1229

1231 RegInfo.addLiveIn(ArgRegs[i], VReg);

1234

1236 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());

1238 }

1239 } else {

1240

1242 MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getStackSize(), true));

1243 }

1244 }

1245

1246

1247 if (!CFRegNode.empty())

1249

1250

1251

1252

1253

1254 for (const ArgDataPair &ArgDI : ArgData) {

1255 if (ArgDI.Flags.isByVal() && ArgDI.Flags.getByValSize()) {

1256 unsigned Size = ArgDI.Flags.getByValSize();

1257 Align Alignment =

1258 std::max(Align(StackSlotSize), ArgDI.Flags.getNonZeroByValAlign());

1259

1264 Chain, dl, FIN, ArgDI.SDV, DAG.getConstant(Size, dl, MVT::i32),

1265 Alignment, false, false, nullptr, std::nullopt,

1266 MachinePointerInfo(), MachinePointerInfo()));

1267 } else {

1269 }

1270 }

1271

1272

1273 if (!MemOps.empty()) {

1276 }

1277

1278 return Chain;

1279}

1280

1281

1282

1283

1284

1285bool XCoreTargetLowering::

1287 bool isVarArg,

1291 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);

1292 if (!CCInfo.CheckReturn(Outs, RetCC_XCore))

1293 return false;

1294 if (CCInfo.getStackSize() != 0 && isVarArg)

1295 return false;

1296 return true;

1297}

1298

1301 bool isVarArg,

1305

1306 XCoreFunctionInfo *XFI =

1309

1310

1311

1313

1314

1315 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,

1317

1318

1319 if (!isVarArg)

1321

1322 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);

1323

1326

1327

1328 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));

1329

1331

1332 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {

1333 CCValAssign &VA = RVLocs[i];

1335 continue;

1337 if (isVarArg) {

1338 report_fatal_error("Can't return value from vararg function in memory");

1339 }

1340

1343

1344 int FI = MFI.CreateFixedObject(ObjSize, Offset, false);

1345

1346

1347

1350 Chain, dl, OutVals[i], FIN,

1352 }

1353

1354

1355

1356 if (!MemOpChains.empty())

1358

1359

1360 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {

1361 CCValAssign &VA = RVLocs[i];

1363 continue;

1364

1366

1367

1368

1371 }

1372

1373 RetOps[0] = Chain;

1374

1375

1377 RetOps.push_back(Glue);

1378

1379 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);

1380}

1381

1382

1383

1384

1385

1391 assert((MI.getOpcode() == XCore::SELECT_CC) &&

1392 "Unexpected instr type to insert");

1393

1394

1395

1396

1397

1400

1401

1402

1403

1404

1405

1406

1411 F->insert(It, copy0MBB);

1412 F->insert(It, sinkMBB);

1413

1414

1418

1419

1422

1423 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))

1424 .addReg(MI.getOperand(1).getReg())

1426

1427

1428

1429

1430 BB = copy0MBB;

1431

1432

1434

1435

1436

1437

1438 BB = sinkMBB;

1439 BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())

1440 .addReg(MI.getOperand(3).getReg())

1442 .addReg(MI.getOperand(2).getReg())

1444

1445 MI.eraseFromParent();

1446 return BB;

1447}

1448

1449

1450

1451

1452

1453SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,

1454 DAGCombinerInfo &DCI) const {

1457 switch (N->getOpcode()) {

1458 default: break;

1460 switch (N->getConstantOperandVal(1)) {

1461 case Intrinsic::xcore_outt:

1462 case Intrinsic::xcore_outct:

1463 case Intrinsic::xcore_chkct: {

1464 SDValue OutVal = N->getOperand(3);

1465

1471 !DCI.isBeforeLegalizeOps());

1475 DCI.CommitTargetLoweringOpt(TLO);

1476 }

1477 break;

1478 }

1479 case Intrinsic::xcore_setpt: {

1480 SDValue Time = N->getOperand(3);

1481

1485 KnownBits Known;

1486 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),

1487 !DCI.isBeforeLegalizeOps());

1491 DCI.CommitTargetLoweringOpt(TLO);

1492 }

1493 break;

1494 }

1495 }

1496 break;

1497 case XCoreISD::LADD: {

1498 SDValue N0 = N->getOperand(0);

1499 SDValue N1 = N->getOperand(1);

1500 SDValue N2 = N->getOperand(2);

1504

1505

1506 if (N0C && !N1C)

1507 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);

1508

1509

1510 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {

1516 }

1517

1518

1519

1520 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) {

1524 if ((Known.Zero & Mask) == Mask) {

1529 }

1530 }

1531 }

1532 break;

1533 case XCoreISD::LSUB: {

1540

1541

1542 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {

1546 if ((Known.Zero & Mask) == Mask) {

1552 }

1553 }

1554

1555

1556

1557 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) {

1561 if ((Known.Zero & Mask) == Mask) {

1566 }

1567 }

1568 }

1569 break;

1570 case XCoreISD::LMUL: {

1578

1579

1580 if ((N0C && !N1C) ||

1582 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),

1583 N1, N0, N2, N3);

1584

1585

1586 if (N1C && N1C->isZero()) {

1587

1588 if (N->hasNUsesOfValue(0, 0)) {

1592 }

1593

1595 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);

1599 }

1600 }

1601 break;

1603

1604

1605

1606

1607 SDValue Mul0, Mul1, Addend0, Addend1;

1608 if (N->getValueType(0) == MVT::i32 &&

1611 DAG.getVTList(MVT::i32, MVT::i32), Mul0,

1612 Mul1, Addend0, Addend1);

1615 }

1617

1618

1619

1620

1621 if (N->getValueType(0) == MVT::i64 &&

1628 Mul0, DAG.getConstant(0, dl, MVT::i32));

1630 Mul1, DAG.getConstant(0, dl, MVT::i32));

1632 Addend0, DAG.getConstant(0, dl, MVT::i32));

1634 Addend1, DAG.getConstant(0, dl, MVT::i32));

1636 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,

1637 Addend0L, Addend1L);

1640 }

1641 }

1642 break;

1643 case ISD::STORE: {

1644

1646 if (!DCI.isBeforeLegalize() ||

1648 ST->getMemoryVT(),

1649 *ST->getMemOperand()) ||

1650 ST->isVolatile() || ST->isIndexed()) {

1651 break;

1652 }

1654

1655 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();

1656 assert((StoreBits % 8) == 0 &&

1657 "Store size in bits must be a multiple of 8");

1658 Align Alignment = ST->getAlign();

1659

1661 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&

1662 LD->getAlign() == Alignment &&

1663 LD->isVolatile() && LD->isIndexed() &&

1666 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), LD->getBasePtr(),

1667 DAG.getConstant(StoreBits / 8, dl, MVT::i32),

1668 Alignment, false, nullptr, isTail,

1669 ST->getPointerInfo(), LD->getPointerInfo());

1670 }

1671 }

1672 break;

1673 }

1674 }

1676}

1677

1678void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,

1680 const APInt &DemandedElts,

1682 unsigned Depth) const {

1684 switch (Op.getOpcode()) {

1685 default: break;

1686 case XCoreISD::LADD:

1687 case XCoreISD::LSUB:

1688 if (Op.getResNo() == 1) {

1689

1692 }

1693 break;

1695 {

1696 unsigned IntNo = Op.getConstantOperandVal(1);

1697 switch (IntNo) {

1698 case Intrinsic::xcore_getts:

1699

1700 Known.Zero =

1702 break;

1703 case Intrinsic::xcore_int:

1704 case Intrinsic::xcore_inct:

1705

1706 Known.Zero =

1708 break;

1709 case Intrinsic::xcore_testct:

1710

1711 Known.Zero =

1713 break;

1714 case Intrinsic::xcore_testwct:

1715

1716 Known.Zero =

1718 break;

1719 }

1720 }

1721 break;

1722 }

1723}

1724

1725

1726

1727

1728

1730{

1731 return (val >= 0 && val <= 11);

1732}

1733

1735{

1736 return (val%2 == 0 && isImmUs(val/2));

1737}

1738

1740{

1741 return (val%4 == 0 && isImmUs(val/4));

1742}

1743

1744

1745

1748 unsigned AS,

1752

1753 unsigned Size = DL.getTypeAllocSize(Ty);

1757 }

1758

1759 switch (Size) {

1760 case 1:

1761

1762 if (AM.Scale == 0) {

1764 }

1765

1767 case 2:

1768 case 3:

1769

1770 if (AM.Scale == 0) {

1772 }

1773

1775 default:

1776

1777 if (AM.Scale == 0) {

1779 }

1780

1782 }

1783}

1784

1785

1786

1787

1788

1789std::pair<unsigned, const TargetRegisterClass *>

1792 MVT VT) const {

1793 if (Constraint.size() == 1) {

1794 switch (Constraint[0]) {

1795 default : break;

1796 case 'r':

1797 return std::make_pair(0U, &XCore::GRRegsRegClass);

1798 }

1799 }

1800

1801

1803}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

AMDGPU Mark last scratch load

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Function Alias Analysis Results

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

This file contains the declarations for the subclasses of Constant, which represent the different fla...

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

Register const TargetRegisterInfo * TRI

Promote Memory to Register

static SDValue LowerCallResult(SDValue Chain, SDValue InGlue, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)

LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...

Definition XCoreISelLowering.cpp:933

static bool isImmUs(int64_t val)

Definition XCoreISelLowering.cpp:1729

static bool isImmUs4(int64_t val)

Definition XCoreISelLowering.cpp:1739

static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)

Definition XCoreISelLowering.cpp:231

static bool isWordAligned(SDValue Value, SelectionDAG &DAG)

Definition XCoreISelLowering.cpp:369

static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)

isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),...

Definition XCoreISelLowering.cpp:533

static bool isImmUs2(int64_t val)

Definition XCoreISelLowering.cpp:1734

Class for arbitrary precision integers.

static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)

Constructs an APInt value that has the bottom loBitsSet bits set.

static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)

Constructs an APInt value that has the top hiBitsSet bits set.

LLVM Basic Block Representation.

CCValAssign - Represent assignment of one arg/retval to a location.

Register getLocReg() const

LocInfo getLocInfo() const

int64_t getLocMemOffset() const

static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)

Getelementptr form.

uint64_t getZExtValue() const

A parsed version of the target data layout string in and methods for querying it.

LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const

Returns an integer type with size at least as big as that of a pointer in the given address space.

int64_t getOffset() const

const GlobalValue * getGlobal() const

bool hasLocalLinkage() const

LLVM_ABI StringRef getSection() const

LLVM_ABI const DataLayout & getDataLayout() const

Get the data layout of the module this global belongs to.

Type * getValueType() const

This is an important class for using LLVM in a threaded context.

static auto integer_valuetypes()

TypeSize getSizeInBits() const

Returns the size of the specified MVT in bits.

LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)

Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...

const BasicBlock * getBasicBlock() const

Return the LLVM basic block that this instance corresponded to originally.

LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())

Add Succ as a successor of this MachineBasicBlock.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

void splice(iterator Where, MachineBasicBlock *Other, iterator From)

Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...

MachineInstrBundleIterator< MachineInstr > iterator

LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)

Create a new object at a fixed location on the stack.

LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)

Create a new statically sized stack object, returning a nonnegative identifier to represent it.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

BasicBlockListType::iterator iterator

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

const MachineJumpTableInfo * getJumpTableInfo() const

getJumpTableInfo - Return the jump table info object for the current function.

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

Representation of each machine instruction.

@ EK_Inline

EK_Inline - Jump table entries are emitted inline at their point of use.

const std::vector< MachineJumpTableEntry > & getJumpTables() const

LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")

createVirtualRegister - Create and return a new virtual register in the function with the specified r...

void addLiveIn(MCRegister Reg, Register vreg=Register())

addLiveIn - Add the specified register as a live-in.

Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...

Represents one node in the SelectionDAG.

unsigned getNumValues() const

Return the number of values defined/returned by this operator.

const SDValue & getOperand(unsigned Num) const

Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.

SDNode * getNode() const

get the SDNode which holds the desired result

bool hasOneUse() const

Return true if there is exactly one node using value ResNo of Node.

LLVM_ABI bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const

Return true if this operand (which must be a chain) reaches the specified operand without crossing an...

SDValue getValue(unsigned R) const

EVT getValueType() const

Return the ValueType of the referenced return value.

TypeSize getValueSizeInBits() const

Returns the size of the value in bits.

const SDValue & getOperand(unsigned i) const

unsigned getOpcode() const

This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...

LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)

SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)

LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)

Create a MERGE_VALUES node from the given operands.

LLVM_ABI SDVTList getVTList(EVT VT)

Return an SDVTList that represents the list of values specified.

LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)

LLVM_ABI SDValue getRegister(Register Reg, EVT VT)

LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)

Loads are not normal binary operators: their result type is not determined by their operands,...

LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)

const TargetLowering & getTargetLoweringInfo() const

SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)

SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)

Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).

SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)

const DataLayout & getDataLayout() const

LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

Create a ConstantSDNode wrapping a constant value.

LLVM_ABI SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)

LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

Helper function to build ISD::STORE nodes.

LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)

Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...

LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)

LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)

LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)

Gets or creates the specified node.

LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const

Return the number of times the sign bit of the register is replicated into the other bits.

SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)

LLVM_ABI SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)

LLVM_ABI bool isBaseWithConstantOffset(SDValue Op) const

Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...

MachineFunction & getMachineFunction() const

LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)

LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const

Determine which bits of Op are known to be either zero or one and return them in Known.

LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const

Return true if 'Op & Mask' is known to be zero.

LLVMContext * getContext() const

LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)

SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)

SDValue getEntryNode() const

Return the token chain corresponding to the entry of the function.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

StringRef - Represent a constant reference to a string, i.e.

bool starts_with(StringRef Prefix) const

Check if this string starts with the given Prefix.

constexpr size_t size() const

size - Get the string size.

TargetInstrInfo - Interface to description of machine instruction set.

void setBooleanVectorContents(BooleanContent Ty)

Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...

void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)

Indicate that the specified operation does not work with the specified type and indicate what to do a...

unsigned MaxStoresPerMemcpyOptSize

Likewise for functions with the OptSize attribute.

const TargetMachine & getTargetMachine() const

void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)

Set the maximum atomic operation size supported by the backend.

void setMinFunctionAlignment(Align Alignment)

Set the target's minimum function alignment.

unsigned MaxStoresPerMemsetOptSize

Likewise for functions with the OptSize attribute.

void setBooleanContents(BooleanContent Ty)

Specify how the target extends the result of integer and floating point boolean values from i1 to a w...

unsigned MaxStoresPerMemmove

Specify maximum number of store instructions per memmove call.

void computeRegisterProperties(const TargetRegisterInfo *TRI)

Once all of the register classes are added, this allows us to compute derived properties we expose.

unsigned MaxStoresPerMemmoveOptSize

Likewise for functions with the OptSize attribute.

void addRegisterClass(MVT VT, const TargetRegisterClass *RC)

Add the specified register class as an available regclass for the specified value type.

virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const

Return the pointer type for the given address space, defaults to the pointer type from the data layou...

void setPrefFunctionAlignment(Align Alignment)

Set the target's preferred function alignment.

unsigned MaxStoresPerMemset

Specify maximum number of store instructions per memset call.

@ ZeroOrOneBooleanContent

void setStackPointerRegisterToSaveRestore(Register R)

If set to a physical register, this specifies the register that llvm.savestack/llvm....

void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)

Targets should invoke this method for each target independent node that they want to provide a custom...

void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified load with extension does not work with the specified type and indicate wh...

std::vector< ArgListEntry > ArgListTy

bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const

This function returns true if the memory access is aligned or if the target allows this specific unal...

unsigned MaxStoresPerMemcpy

Specify maximum number of store instructions per memcpy call.

void setSchedulingPreference(Sched::Preference Pref)

Specify the target scheduling preference.

This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...

bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const

Check to see if the specified operand of the specified instruction is a constant integer.

std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const

This function lowers an abstract call to a function into an actual call.

virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const

Given a physical register constraint (e.g.

bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const

Look at Op.

TargetLowering(const TargetLowering &)=delete

bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const

Check whether a given call node is in tail position within its function.

virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const

Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.

Primary interface to the complete machine description for the target machine.

CodeModel::Model getCodeModel() const

Returns the code model.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

virtual Register getFrameRegister(const MachineFunction &MF) const =0

Debug information queries.

The instances of the Type class are immutable: once they are created, they are never changed.

static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)

@ VoidTyID

type with no size

static LLVM_ABI Type * getVoidTy(LLVMContext &C)

static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)

bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const

Return true if it makes sense to take the size of this type.

bool isFunctionTy() const

True if this is an instance of FunctionType.

LLVM Value Representation.

LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const

Returns an alignment of the pointer value.

static int stackSlotSize()

Stack slot size (4 bytes)

void setVarArgsFrameIndex(int off)

void setReturnStackOffset(unsigned value)

int getVarArgsFrameIndex() const

int createLRSpillSlot(MachineFunction &MF)

unsigned getReturnStackOffset() const

bool isZExtFree(SDValue Val, EVT VT2) const override

Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...

Definition XCoreISelLowering.cpp:146

MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override

This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...

Definition XCoreISelLowering.cpp:1387

bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override

isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...

Definition XCoreISelLowering.cpp:1746

SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override

LowerOperation - Provide custom lowering hooks for some operations.

Definition XCoreISelLowering.cpp:165

void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override

ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...

Definition XCoreISelLowering.cpp:197

unsigned getJumpTableEncoding() const override

Return the entry encoding for a jump table in the current function.

Definition XCoreISelLowering.cpp:304

XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)

Definition XCoreISelLowering.cpp:43

self_iterator getIterator()

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Align[]

Key for Kernel::Arg::Metadata::mAlign.

constexpr char Args[]

Key for Kernel::Metadata::mArgs.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ Fast

Attempts to make calls as fast as possible (e.g.

@ C

The default llvm calling convention, compatible with C.

@ SMUL_LOHI

SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...

@ FRAME_TO_ARGS_OFFSET

FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...

@ ADD

Simple integer binary arithmetic operators.

@ ANY_EXTEND

ANY_EXTEND - Used for integer types. The high bits are undefined.

@ INTRINSIC_VOID

OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...

@ BUILD_PAIR

BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.

@ EH_RETURN

OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...

@ SIGN_EXTEND

Conversion operators.

@ EXTRACT_ELEMENT

EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...

@ MULHU

MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...

@ SHL

Shift and rotation operations.

@ ZERO_EXTEND

ZERO_EXTEND - Used for integer types, zeroing the new bits.

@ SELECT_CC

Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...

@ FRAMEADDR

FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.

@ AND

Bitwise operators - logical and, logical or, logical xor.

@ INTRINSIC_WO_CHAIN

RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...

@ TokenFactor

TokenFactor - This node takes multiple tokens as input and produces a single token result.

@ SHL_PARTS

SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.

@ INTRINSIC_W_CHAIN

RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...

NodeAddr< NodeBase * > Node

This is an optimization pass for GlobalISel generic memory operations.

@ Low

Lower the current thread's priority such that it does not affect foreground tasks significantly.

FunctionAddr VTableAddr Value

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

static const unsigned CodeModelLargeSize

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

LLVM_ABI raw_fd_ostream & errs()

This returns a reference to a raw_ostream for standard error.

FunctionAddr VTableAddr uintptr_t uintptr_t Data

@ Mul

Product of integers.

uint16_t MCPhysReg

An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

DWARFExpression::Operation Op

constexpr unsigned BitWidth

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

This struct is a compact representation of a valid (non-zero power of two) alignment.

bool isSimple() const

Test if the given EVT is simple (as opposed to being extended).

TypeSize getSizeInBits() const

Return the size of the specified value type in bits.

MVT getSimpleVT() const

Return the SimpleValueType held in the specified simple EVT.

bool isInteger() const

Return true if this is an integer or a vector integer type.

unsigned countMinTrailingZeros() const

Returns the minimum number of trailing zero bits.

unsigned getBitWidth() const

Get the bit width of this value.

void resetAll()

Resets the known state of all bits.

static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

This represents a list of ValueType's that has been intern'd by a SelectionDAG.

This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...

This structure contains all information that is necessary for lowering calls.

SmallVector< ISD::InputArg, 32 > Ins

SmallVector< ISD::OutputArg, 32 > Outs

SmallVector< SDValue, 32 > OutVals

A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...