LLVM: lib/Target/Hexagon/HexagonISelLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

46#include "llvm/IR/IntrinsicsHexagon.h"

58#include

59#include

60#include

61#include

62#include

63

64using namespace llvm;

65

66#define DEBUG_TYPE "hexagon-lowering"

67

70 cl::desc("Control jump table emission on Hexagon target"));

71

74 cl::desc("Enable Hexagon SDNode scheduling"));

75

78 cl::desc("Set minimum jump tables"));

79

82 cl::desc("Max #stores to inline memcpy"));

83

86 cl::desc("Max #stores to inline memcpy"));

87

90 cl::desc("Max #stores to inline memmove"));

91

95 cl::desc("Max #stores to inline memmove"));

96

99 cl::desc("Max #stores to inline memset"));

100

103 cl::desc("Max #stores to inline memset"));

104

107 cl::desc("Convert constant loads to immediate values."));

108

111 cl::desc("Rewrite unaligned loads as a pair of aligned loads"));

112

116 cl::desc("Disable minimum alignment of 1 for "

117 "arguments passed by value on stack"));

118

119

120

124 static const MCPhysReg ArgRegs[] = {

125 Hexagon::R0, Hexagon::R1, Hexagon::R2,

126 Hexagon::R3, Hexagon::R4, Hexagon::R5

127 };

128 const unsigned NumArgRegs = std::size(ArgRegs);

129 unsigned RegNum = State.getFirstUnallocated(ArgRegs);

130

131

132 if (RegNum != NumArgRegs && RegNum % 2 == 1)

133 State.AllocateReg(ArgRegs[RegNum]);

134

135

136

137

138 return false;

139}

140

141#include "HexagonGenCallingConv.inc"

142

145 unsigned &NumIntermediates, MVT &RegisterVT) const {

146

150

151

152

153 if (isBoolVector && !Subtarget.useHVXOps() && isPowerOf2 && NumElts >= 8) {

154 RegisterVT = MVT::v8i8;

155 IntermediateVT = MVT::v8i1;

156 NumIntermediates = NumElts / 8;

157 return NumIntermediates;

158 }

159

160

161

162 if (isBoolVector && Subtarget.useHVX64BOps() && isPowerOf2 && NumElts >= 64) {

163 RegisterVT = MVT::v64i8;

164 IntermediateVT = MVT::v64i1;

165 NumIntermediates = NumElts / 64;

166 return NumIntermediates;

167 }

168

169

170

171 if (isBoolVector && Subtarget.useHVX128BOps() && isPowerOf2 &&

172 NumElts >= 128) {

173 RegisterVT = MVT::v128i8;

174 IntermediateVT = MVT::v128i1;

175 NumIntermediates = NumElts / 128;

176 return NumIntermediates;

177 }

178

180 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);

181}

182

183std::pair<MVT, unsigned>

187

189

192

193 if (!Subtarget.useHVXOps() && NumElems >= 8)

194 return {MVT::v8i8, NumElems / 8};

195

196 if (Subtarget.useHVX64BOps() && NumElems >= 64)

197 return {MVT::v64i8, NumElems / 64};

198

199 if (Subtarget.useHVX128BOps() && NumElems >= 128)

200 return {MVT::v128i8, NumElems / 128};

201

203}

204

207 EVT VT) const {

208

210 auto [RegisterVT, NumRegisters] =

213 return RegisterVT;

214 }

215

217}

218

221 const {

222 unsigned IntNo = Op.getConstantOperandVal(0);

224 switch (IntNo) {

225 default:

226 return SDValue();

227 case Intrinsic::thread_pointer: {

230 }

231 }

232}

233

234

235

236

237

238

242 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);

244 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),

245 false, false,

247}

248

249bool

255 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);

256

258 return CCInfo.CheckReturn(Outs, RetCC_Hexagon_HVX);

259 return CCInfo.CheckReturn(Outs, RetCC_Hexagon);

260}

261

262

263

264

267 bool IsVarArg,

271

273

274

277

278

279 if (Subtarget.useHVXOps())

281 else

283

286

287

288 for (unsigned i = 0; i != RVLocs.size(); ++i) {

290 SDValue Val = OutVals[i];

291

293 default:

294

297 break;

300 break;

303 break;

306 break;

309 break;

310 }

311

313

314

317 }

318

319 RetOps[0] = Chain;

320

321

324

326}

327

332

335

337 .Case("r0", Hexagon::R0)

338 .Case("r1", Hexagon::R1)

339 .Case("r2", Hexagon::R2)

340 .Case("r3", Hexagon::R3)

341 .Case("r4", Hexagon::R4)

342 .Case("r5", Hexagon::R5)

343 .Case("r6", Hexagon::R6)

344 .Case("r7", Hexagon::R7)

345 .Case("r8", Hexagon::R8)

346 .Case("r9", Hexagon::R9)

347 .Case("r10", Hexagon::R10)

348 .Case("r11", Hexagon::R11)

349 .Case("r12", Hexagon::R12)

350 .Case("r13", Hexagon::R13)

351 .Case("r14", Hexagon::R14)

352 .Case("r15", Hexagon::R15)

353 .Case("r16", Hexagon::R16)

354 .Case("r17", Hexagon::R17)

355 .Case("r18", Hexagon::R18)

356 .Case("r19", Hexagon::R19)

357 .Case("r20", Hexagon::R20)

358 .Case("r21", Hexagon::R21)

359 .Case("r22", Hexagon::R22)

360 .Case("r23", Hexagon::R23)

361 .Case("r24", Hexagon::R24)

362 .Case("r25", Hexagon::R25)

363 .Case("r26", Hexagon::R26)

364 .Case("r27", Hexagon::R27)

365 .Case("r28", Hexagon::R28)

366 .Case("r29", Hexagon::R29)

367 .Case("r30", Hexagon::R30)

368 .Case("r31", Hexagon::R31)

369 .Case("r1:0", Hexagon::D0)

370 .Case("r3:2", Hexagon::D1)

371 .Case("r5:4", Hexagon::D2)

372 .Case("r7:6", Hexagon::D3)

373 .Case("r9:8", Hexagon::D4)

374 .Case("r11:10", Hexagon::D5)

375 .Case("r13:12", Hexagon::D6)

376 .Case("r15:14", Hexagon::D7)

377 .Case("r17:16", Hexagon::D8)

378 .Case("r19:18", Hexagon::D9)

379 .Case("r21:20", Hexagon::D10)

380 .Case("r23:22", Hexagon::D11)

381 .Case("r25:24", Hexagon::D12)

382 .Case("r27:26", Hexagon::D13)

383 .Case("r29:28", Hexagon::D14)

384 .Case("r31:30", Hexagon::D15)

385 .Case("sp", Hexagon::R29)

386 .Case("fp", Hexagon::R30)

387 .Case("lr", Hexagon::R31)

388 .Case("p0", Hexagon::P0)

389 .Case("p1", Hexagon::P1)

390 .Case("p2", Hexagon::P2)

391 .Case("p3", Hexagon::P3)

392 .Case("sa0", Hexagon::SA0)

393 .Case("lc0", Hexagon::LC0)

394 .Case("sa1", Hexagon::SA1)

395 .Case("lc1", Hexagon::LC1)

396 .Case("m0", Hexagon::M0)

397 .Case("m1", Hexagon::M1)

398 .Case("usr", Hexagon::USR)

399 .Case("ugp", Hexagon::UGP)

400 .Case("cs0", Hexagon::CS0)

401 .Case("cs1", Hexagon::CS1)

403 return Reg;

404}

405

406

407

408

409

410

416

418

421

422 if (Subtarget.useHVXOps())

424 else

426

427

428 for (unsigned i = 0; i != RVLocs.size(); ++i) {

430 if (RVLocs[i].getValVT() == MVT::i1) {

431

432

433

434

435

438 MVT::i32, Glue);

439

440 Register PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);

443

444

445

446

450 } else {

451 RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),

452 RVLocs[i].getValVT(), Glue);

455 }

457 }

458

459 return Chain;

460}

461

462

463

475 bool IsVarArg = CLI.IsVarArg;

477

478 bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();

482

485

486

487 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;

488

489

491 CCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext());

492

493 if (Subtarget.useHVXOps())

497 else

499

503 IsVarArg, IsStructRet, StructAttrFlag, Outs,

504 OutVals, Ins, DAG);

506 if (VA.isMemLoc()) {

508 break;

509 }

510 }

512 : "Argument must be passed on stack. "

513 "Not eligible for Tail Call\n"));

514 }

515

519

523

524 bool NeedsArgAlign = false;

525 Align LargestAlignSeen;

526

527 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {

529 SDValue Arg = OutVals[i];

531

532 bool ArgAlign = Subtarget.isHVXVectorType(VA.getValVT());

533 NeedsArgAlign |= ArgAlign;

534

535

537 default:

538

541 break;

544 break;

547 break;

550 break;

553 break;

554 }

555

559 StackPtr.getValueType());

560 MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);

561 if (ArgAlign)

562 LargestAlignSeen = std::max(

564 if (Flags.isByVal()) {

565

566

568 Flags, DAG, dl));

569 } else {

572 SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI);

574 }

575 continue;

576 }

577

578

579

582 }

583

584 if (NeedsArgAlign && Subtarget.hasV60Ops()) {

585 LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n");

586 Align VecAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);

587 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);

589 }

590

591

592 if (!MemOpChains.empty())

594

599 }

600

601

602

603

604

606 for (const auto &R : RegsToPass) {

607 Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue);

609 }

610 } else {

611

612

613

614

615

616

617

618

619

620

622 for (const auto &R : RegsToPass) {

623 Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue);

625 }

627 }

628

631

632

633

634

640 }

641

642

644 Ops.push_back(Chain);

645 Ops.push_back(Callee);

646

647

648

649 for (const auto &R : RegsToPass)

650 Ops.push_back(DAG.getRegister(R.first, R.second.getValueType()));

651

653 assert(Mask && "Missing call preserved mask for calling convention");

655

657 Ops.push_back(Glue);

658

662 }

663

664

665

666

668

670 Chain = DAG.getNode(OpCode, dl, {MVT::Other, MVT::Glue}, Ops);

672

673

674 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, Glue, dl);

676

677

678

679 return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG,

680 InVals, OutVals, Callee);

681}

682

683

684

685

690 if (!LSN)

691 return false;

694 return false;

695 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||

696 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||

697 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||

698 VT == MVT::v4i16 || VT == MVT::v8i8 ||

699 Subtarget.isHVXVectorType(VT.getSimpleVT());

700 if (!IsLegalType)

701 return false;

702

704 return false;

705 Base = Op->getOperand(0);

708 return false;

710

712 return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, V);

713}

714

718 else

719 return Op;

720}

721

727 unsigned LR = HRI.getRARegister();

728

729 if ((Op.getOpcode() != ISD::INLINEASM &&

730 Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR())

731 return Op;

732

733 unsigned NumOps = Op.getNumOperands();

734 if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue)

735 --NumOps;

736

739 unsigned NumVals = Flags.getNumOperandRegisters();

740 ++i;

741

742 switch (Flags.getKind()) {

743 default:

748 i += NumVals;

749 break;

753 for (; NumVals; --NumVals, ++i) {

755 if (Reg != LR)

756 continue;

757 HMFI.setHasClobberLR(true);

758 return Op;

759 }

760 break;

761 }

762 }

763 }

764

765 return Op;

766}

767

768

769

770

773 SDValue Chain = Op.getOperand(0);

775

776

780}

781

782

783

784

785

793

794

795

796

797

805

808 SDValue Chain = Op.getOperand(0);

809 unsigned IntNo = Op.getConstantOperandVal(1);

810

811 if (IntNo == Intrinsic::hexagon_prefetch) {

816 }

818}

819

823 SDValue Chain = Op.getOperand(0);

827

829 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC");

830

832 auto &HFI = *Subtarget.getFrameLowering();

833

834 if (A == 0)

835 A = HFI.getStackAlign().value();

836

838 dbgs () << __func__ << " Align: " << A << " Size: ";

839 Size.getNode()->dump(&DAG);

840 dbgs() << "\n";

841 });

842

846

848 return AA;

849}

850

858

859

860 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;

861

862

864 CCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext());

865

866 if (Subtarget.useHVXOps())

870 else

872

873

874

875

876

877

878

880 switch (RC.getID()) {

881 case Hexagon::IntRegsRegClassID:

882 return Reg - Hexagon::R0 + 1;

883 case Hexagon::DoubleRegsRegClassID:

884 return (Reg - Hexagon::D0 + 1) * 2;

885 case Hexagon::HvxVRRegClassID:

886 return Reg - Hexagon::V0 + 1;

887 case Hexagon::HvxWRRegClassID:

888 return (Reg - Hexagon::W0 + 1) * 2;

889 }

891 };

892

893 auto &HFL = const_cast<HexagonFrameLowering&>(*Subtarget.getFrameLowering());

895 HFL.FirstVarArgSavedReg = 0;

897

898 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {

901 bool ByVal = Flags.isByVal();

902

903

904

905

906

907 if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8)

909

910 bool InReg = VA.isRegLoc() &&

911 (!ByVal || (ByVal && Flags.getByValSize() > 8));

912

913 if (InReg) {

917

919 Register VReg = MRI.createVirtualRegister(RC);

921

922

923

924

925 if (VA.getValVT() == MVT::i1) {

931 } else {

932#ifndef NDEBUG

935 Subtarget.isHVXVectorType(RegVT));

936#endif

937 }

940 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg());

941 } else {

942 assert(VA.isMemLoc() && "Argument should be passed in memory");

943

944

945

946 unsigned ObjSize = Flags.isByVal()

947 ? Flags.getByValSize()

949

950

954

955 if (Flags.isByVal()) {

956

957

958

960 } else {

964 }

965 }

966 }

967

968 if (IsVarArg && Subtarget.isEnvironmentMusl()) {

969 for (int i = HFL.FirstVarArgSavedReg; i < 6; i++)

970 MRI.addLiveIn(Hexagon::R0+i);

971 }

972

973 if (IsVarArg && Subtarget.isEnvironmentMusl()) {

974 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);

976

977

978 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;

979 bool RequiresPadding = (NumVarArgRegs & 1);

980 int RegSaveAreaSizePlusPadding = RequiresPadding

981 ? (NumVarArgRegs + 1) * 4

982 : NumVarArgRegs * 4;

983

984 if (RegSaveAreaSizePlusPadding > 0) {

985

987 if (!(RegAreaStart % 8))

988 RegAreaStart = (RegAreaStart + 7) & -8;

989

990 int RegSaveAreaFrameIndex =

991 MFI.CreateFixedObject(RegSaveAreaSizePlusPadding, RegAreaStart, true);

992 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);

993

994

995 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;

997 HMFI.setVarArgsFrameIndex(FI);

998 } else {

999

1000

1003 HMFI.setRegSavedAreaStartFrameIndex(FI);

1004 HMFI.setVarArgsFrameIndex(FI);

1005 }

1006 }

1007

1008

1009 if (IsVarArg && !Subtarget.isEnvironmentMusl()) {

1010

1013 HMFI.setVarArgsFrameIndex(FI);

1014 }

1015

1016 return Chain;

1017}

1018

1021

1022

1027

1028 if (!Subtarget.isEnvironmentMusl()) {

1029 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1),

1031 }

1033 auto &HFL = *Subtarget.getFrameLowering();

1036

1037

1039

1040

1041

1042

1043

1044

1045

1046 SDValue SavedRegAreaStartFrameIndex =

1047 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32);

1048

1050

1051 if (HFL.FirstVarArgSavedReg & 1)

1052 SavedRegAreaStartFrameIndex =

1054 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(),

1055 MVT::i32),

1057

1058

1061 SavedRegAreaStartFrameIndex,

1064

1065

1069 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),

1070 PtrVT),

1073

1074

1078 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),

1079 PtrVT),

1082

1084}

1085

1088

1089 assert(Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled");

1090 SDValue Chain = Op.getOperand(0);

1091 SDValue DestPtr = Op.getOperand(1);

1092 SDValue SrcPtr = Op.getOperand(2);

1096

1097

1100 false, false, nullptr, std::nullopt,

1102}

1103

1109 MVT ResTy = ty(Op);

1110 MVT OpTy = ty(LHS);

1111

1112 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {

1114 assert(ElemTy.isScalarInteger());

1117 return DAG.getSetCC(dl, ResTy,

1120 }

1121

1122

1124 return Op;

1125

1126

1127

1128

1129 auto isSExtFree = [this](SDValue N) {

1130 switch (N.getOpcode()) {

1132

1135 return false;

1139

1140

1141

1142 return ThisBW >= OrigBW;

1143 }

1144 case ISD::LOAD:

1145

1146 return true;

1147 }

1148 return false;

1149 };

1150

1151 if (OpTy == MVT::i8 || OpTy == MVT::i16) {

1153 bool IsNegative = C && C->getAPIntValue().isNegative();

1154 if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS))

1155 return DAG.getSetCC(dl, ResTy,

1158 }

1159

1161}

1162

1165 SDValue PredOp = Op.getOperand(0);

1166 SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);

1167 MVT OpTy = ty(Op1);

1169

1170 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {

1172 assert(ElemTy.isScalarInteger());

1175

1177 DAG.getSelect(dl, WideTy, PredOp,

1180 dl, OpTy);

1181 }

1182

1184}

1185

1188 EVT ValTy = Op.getValueType();

1191 bool isVTi1Type = false;

1193 if (cast(CV->getType())->getElementType()->isIntegerTy(1)) {

1196 unsigned VecLen = CV->getNumOperands();

1198 "conversion only supported for pow2 VectorSize");

1199 for (unsigned i = 0; i < VecLen; ++i)

1200 NewConst.push_back(IRB.getInt8(CV->getOperand(i)->isZeroValue()));

1201

1203 isVTi1Type = true;

1204 }

1205 }

1209

1210 unsigned Offset = 0;

1215 else if (isVTi1Type)

1217 else

1219 TF);

1220

1222 "Inconsistent target flag encountered");

1223

1224 if (IsPositionIndependent)

1227}

1228

1231 EVT VT = Op.getValueType();

1236 }

1237

1240}

1241

1248

1249 EVT VT = Op.getValueType();

1251 unsigned Depth = Op.getConstantOperandVal(0);

1258 }

1259

1260

1263}

1264

1270

1271 EVT VT = Op.getValueType();

1273 unsigned Depth = Op.getConstantOperandVal(0);

1279 return FrameAddr;

1280}

1281

1287

1293 auto *GV = GAN->getGlobal();

1294 int64_t Offset = GAN->getOffset();

1295

1296 auto &HLOF = *HTM.getObjFileLowering();

1298

1302 if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))

1305 }

1306

1308 if (UsePCRel) {

1312 }

1313

1314

1319}

1320

1321

1337

1346

1350 unsigned char OperandFlags) const {

1358 OperandFlags);

1359

1360

1361

1362

1363

1364

1365

1366

1367 const auto &HRI = *Subtarget.getRegisterInfo();

1369 assert(Mask && "Missing call preserved mask for calling convention");

1373

1374

1376

1378 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);

1379}

1380

1381

1382

1383

1390

1391

1393

1395 unsigned char TF =

1397

1398

1401

1403

1404 if (IsPositionIndependent) {

1405

1407

1408

1409

1411 }

1412

1413

1414

1417

1418

1419

1420 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset);

1421}

1422

1423

1424

1425

1432

1433

1435

1439

1440

1441

1443}

1444

1445

1446

1447

1454

1455

1458

1459

1461

1462

1465

1466

1470

1474

1476 Hexagon::R0, Flags);

1477}

1478

1479

1480

1481

1482

1483

1488

1489 switch (HTM.getTLSModel(GA->getGlobal())) {

1497 }

1499}

1500

1501

1502

1503

1504

1509 auto &HRI = *Subtarget.getRegisterInfo();

1510

1517

1520

1523 else

1525

1526

1533

1534

1535

1536

1537

1539 addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass);

1540 addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass);

1541 addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass);

1549

1552

1553

1554

1555

1556

1557

1558

1559

1560

1561

1562

1563

1564

1565

1585

1586

1590

1591

1596

1597

1601 if (Subtarget.isEnvironmentMusl())

1603 else

1605

1609

1612 else

1615

1616 for (unsigned LegalIntOp :

1620 }

1621

1622

1623

1631 }

1634

1639

1640

1645

1650

1655

1656 for (unsigned IntExpOp :

1663 }

1664

1665 for (unsigned FPExpOp :

1666 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS,

1670 }

1671

1672

1677 }

1678

1682

1685

1686

1690 }

1694 }

1696

1697

1698

1699

1700

1701

1702

1703

1704 static const unsigned VectExpOps[] = {

1705

1709

1712

1714 ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN,

1715 ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2,

1716 ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC,

1717 ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR,

1718 ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM,

1719 ISD::FSINCOS, ISD::FLDEXP,

1720

1722

1728 };

1729

1730

1732 for (unsigned VectExpOp : VectExpOps)

1734

1735

1737 if (TargetVT == VT)

1738 continue;

1743 }

1744

1745

1746 if (VT.getVectorElementType() != MVT::i32) {

1750 }

1754 }

1755

1758

1759

1760

1767

1771

1772

1773 for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,

1774 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {

1781

1788

1789 if (NativeVT.getVectorElementType() != MVT::i1) {

1793 }

1794 }

1795

1796 for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) {

1801 }

1802

1803

1804

1805

1806

1807 for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,

1808 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {

1811 }

1812

1813

1814 for (MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) {

1817 }

1818

1819

1820 for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,

1821 MVT::v2i32}) {

1829 }

1830

1831

1832 for (MVT VT : {MVT::i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}) {

1837 }

1838

1839

1847

1848

1854

1857

1870

1871

1872

1880

1885

1888

1889

1890

1891 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,

1892 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {

1895 }

1896

1897

1898

1899 if (Subtarget.hasV60Ops()) {

1904 }

1905 if (Subtarget.hasV66Ops()) {

1908 }

1909 if (Subtarget.hasV67Ops()) {

1913 }

1914

1918

1919 if (Subtarget.useHVXOps())

1920 initializeHVXLowering();

1921

1923}

1924

1964 return "HexagonISD::THREAD_POINTER";

1981 }

1982 return nullptr;

1983}

1984

1985bool

1986HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, Align NeedAlign,

1989 if (!CA)

1990 return true;

1991 unsigned Addr = CA->getZExtValue();

1992 Align HaveAlign =

1994 if (HaveAlign >= NeedAlign)

1995 return true;

1996

1998

1999 struct DiagnosticInfoMisalignedTrap : public DiagnosticInfo {

2000 DiagnosticInfoMisalignedTrap(StringRef M)

2003 DP << Msg;

2004 }

2005 static bool classof(const DiagnosticInfo *DI) {

2006 return DI->getKind() == DK_MisalignedTrap;

2007 }

2008 StringRef Msg;

2009 };

2010

2011 std::string ErrMsg;

2012 raw_string_ostream O(ErrMsg);

2013 O << "Misaligned constant address: " << format_hex(Addr, 10)

2014 << " has alignment " << HaveAlign.value()

2015 << ", but the memory access requires " << NeedAlign.value();

2018 O << ". The instruction has been replaced with a trap.";

2019

2021 return false;

2022}

2023

2026 const {

2027 const SDLoc &dl(Op);

2029 assert(LS->isIndexed() && "Not expecting indexed ops on constant address");

2030

2033 if (LS->getOpcode() == ISD::LOAD)

2035 return Trap;

2036}

2037

2038

2039

2042 return (ID == Intrinsic::hexagon_L2_loadrd_pbr ||

2043 ID == Intrinsic::hexagon_L2_loadri_pbr ||

2044 ID == Intrinsic::hexagon_L2_loadrh_pbr ||

2045 ID == Intrinsic::hexagon_L2_loadruh_pbr ||

2046 ID == Intrinsic::hexagon_L2_loadrb_pbr ||

2047 ID == Intrinsic::hexagon_L2_loadrub_pbr);

2048}

2049

2050

2051

2052

2061

2062

2063

2064

2067 int Idx = -1;

2070

2071 if (Blk == Parent) {

2074

2075 do {

2076 BaseVal = BackEdgeVal;

2078 } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));

2079

2080

2081 if (IntrBaseVal == BackEdgeVal)

2082 continue;

2083 Idx = i;

2084 break;

2085 } else

2086 Idx = i;

2087 }

2088 assert(Idx >= 0 && "Unexpected index to incoming argument in PHI");

2090}

2091

2092

2093

2094

2096 Value *IntrBaseVal = V;

2098

2099

2100 do {

2101 BaseVal = V;

2103 } while (BaseVal != V);

2104

2105

2108

2109 else

2110 return V;

2111}

2112

2113

2114

2115

2116

2122 case Intrinsic::hexagon_L2_loadrd_pbr:

2123 case Intrinsic::hexagon_L2_loadri_pbr:

2124 case Intrinsic::hexagon_L2_loadrh_pbr:

2125 case Intrinsic::hexagon_L2_loadruh_pbr:

2126 case Intrinsic::hexagon_L2_loadrb_pbr:

2127 case Intrinsic::hexagon_L2_loadrub_pbr: {

2129 auto &DL = I.getDataLayout();

2130 auto &Cont = I.getCalledFunction()->getParent()->getContext();

2131

2132

2133

2134 Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(0);

2138

2139

2140 Info.offset = 0;

2141 Info.align = DL.getABITypeAlign(Info.memVT.getTypeForEVT(Cont));

2143 return true;

2144 }

2145 case Intrinsic::hexagon_V6_vgathermw:

2146 case Intrinsic::hexagon_V6_vgathermw_128B:

2147 case Intrinsic::hexagon_V6_vgathermh:

2148 case Intrinsic::hexagon_V6_vgathermh_128B:

2149 case Intrinsic::hexagon_V6_vgathermhw:

2150 case Intrinsic::hexagon_V6_vgathermhw_128B:

2151 case Intrinsic::hexagon_V6_vgathermwq:

2152 case Intrinsic::hexagon_V6_vgathermwq_128B:

2153 case Intrinsic::hexagon_V6_vgathermhq:

2154 case Intrinsic::hexagon_V6_vgathermhq_128B:

2155 case Intrinsic::hexagon_V6_vgathermhwq:

2156 case Intrinsic::hexagon_V6_vgathermhwq_128B:

2157 case Intrinsic::hexagon_V6_vgather_vscattermh:

2158 case Intrinsic::hexagon_V6_vgather_vscattermh_128B: {

2159 const Module &M = *I.getParent()->getParent()->getParent();

2161 Type *VecTy = I.getArgOperand(1)->getType();

2163 Info.ptrVal = I.getArgOperand(0);

2164 Info.offset = 0;

2165 Info.align =

2166 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);

2170 return true;

2171 }

2172 default:

2173 break;

2174 }

2175 return false;

2176}

2177

2179 return X.getValueType().isScalarInteger();

2180}

2181

2185

2191

2196

2197

2199 unsigned DefinedValues) const {

2200 return false;

2201}

2202

2204 unsigned Index) const {

2207 return false;

2208

2211 return true;

2212

2213

2215}

2216

2221

2223 EVT VT) const {

2224 return true;

2225}

2226

2231

2234

2235 if (Subtarget.useHVXOps()) {

2236 unsigned Action = getPreferredHvxVectorAction(VT);

2237 if (Action != ~0u)

2239 }

2240

2241

2242 if (ElemTy == MVT::i1)

2244

2245

2246

2249

2251}

2252

2255 if (Subtarget.useHVXOps()) {

2256 unsigned Action = getCustomHvxOperationAction(Op);

2257 if (Action != ~0u)

2259 }

2261}

2262

2263std::pair<SDValue, int>

2264HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const {

2268 return { Addr.getOperand(0), CN->getSExtValue() };

2269 }

2270 return { Addr, 0 };

2271}

2272

2273

2274

2277 const {

2280 assert(AM.size() <= 8 && "Unexpected shuffle mask");

2281 unsigned VecLen = AM.size();

2282

2283 MVT VecTy = ty(Op);

2284 assert(!Subtarget.isHVXVectorType(VecTy, true) &&

2285 "HVX shuffles should be legal");

2287

2291

2292

2293

2294

2295 if (ty(Op0) != VecTy || ty(Op1) != VecTy)

2297

2298

2299

2301 unsigned F = llvm::find_if(AM, [](int M) { return M >= 0; }) - AM.data();

2302 if (F == AM.size())

2304 if (AM[F] >= int(VecLen)) {

2307 }

2308

2309

2312 for (int M : Mask) {

2313 if (M < 0) {

2314 for (unsigned j = 0; j != ElemBytes; ++j)

2316 } else {

2317 for (unsigned j = 0; j != ElemBytes; ++j)

2318 ByteMask.push_back(M*ElemBytes + j);

2319 }

2320 }

2322

2323

2324

2325

2326

2327

2330 for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) {

2331 unsigned S = 8*i;

2332 uint64_t M = ByteMask[i] & 0xFF;

2333 if (M == 0xFF)

2334 MaskUnd |= M << S;

2335 MaskIdx |= M << S;

2336 }

2337

2338 if (ByteMask.size() == 4) {

2339

2340 if (MaskIdx == (0x03020100 | MaskUnd))

2341 return Op0;

2342

2343 if (MaskIdx == (0x00010203 | MaskUnd)) {

2347 }

2348

2349

2351 getCombine(Op1, Op0, dl, typeJoin({ty(Op1), ty(Op0)}), DAG);

2352 if (MaskIdx == (0x06040200 | MaskUnd))

2353 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);

2354 if (MaskIdx == (0x07050301 | MaskUnd))

2355 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);

2356

2358 getCombine(Op0, Op1, dl, typeJoin({ty(Op0), ty(Op1)}), DAG);

2359 if (MaskIdx == (0x02000604 | MaskUnd))

2360 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);

2361 if (MaskIdx == (0x03010705 | MaskUnd))

2362 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);

2363 }

2364

2365 if (ByteMask.size() == 8) {

2366

2367 if (MaskIdx == (0x0706050403020100ull | MaskUnd))

2368 return Op0;

2369

2370 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {

2374 }

2375

2376

2377 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))

2378 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);

2379 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))

2380 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);

2381 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))

2382 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);

2383 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))

2384 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);

2385 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {

2386 VectorPair P = opSplit(Op0, dl, DAG);

2387 return getInstr(Hexagon::S2_packhl, dl, VecTy, {P.second, P.first}, DAG);

2388 }

2389

2390

2391 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))

2392 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);

2393 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))

2394 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);

2395 }

2396

2398}

2399

2402 switch (Op.getOpcode()) {

2405 return S;

2406 break;

2408 return Op.getOperand(0);

2409 }

2411}

2412

2413

2416 const {

2417 unsigned NewOpc;

2418 switch (Op.getOpcode()) {

2421 break;

2424 break;

2427 break;

2428 default:

2430 }

2431

2432 if (SDValue Sp = getSplatValue(Op.getOperand(1), DAG))

2433 return DAG.getNode(NewOpc, SDLoc(Op), ty(Op), Op.getOperand(0), Sp);

2435}

2436

2440

2441

2442

2443

2445 if (SDValue S = getVectorShiftByInt(Op, DAG))

2446 Res = S;

2447

2449 switch (Opc) {

2453 break;

2454 default:

2455

2457 }

2458

2459 MVT ResTy = ty(Res);

2461 return Res;

2462

2463

2466

2467 auto ShiftPartI8 = [&dl, &DAG, this](unsigned Opc, SDValue V, SDValue A) {

2468 MVT Ty = ty(V);

2474 };

2475

2477 return ShiftPartI8(Opc, Val, Amt);

2478

2479 auto [LoV, HiV] = opSplit(Val, dl, DAG);

2481 {ShiftPartI8(Opc, LoV, Amt), ShiftPartI8(Opc, HiV, Amt)});

2482}

2483

2490

2493 MVT ResTy = ty(Op);

2494 SDValue InpV = Op.getOperand(0);

2495 MVT InpTy = ty(InpV);

2498

2499

2500 if (InpTy == MVT::i8) {

2501 if (ResTy == MVT::v8i1) {

2504 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);

2505 }

2507 }

2508

2509 return Op;

2510}

2511

2512bool

2513HexagonTargetLowering::getBuildVectorConstInts(ArrayRef Values,

2519 bool AllConst = true;

2520

2521 for (unsigned i = 0, e = Values.size(); i != e; ++i) {

2523 if (V.isUndef()) {

2524 Consts[i] = ConstantInt::get(IntTy, 0);

2525 continue;

2526 }

2527

2529 const ConstantInt *CI = CN->getConstantIntValue();

2532 const ConstantFP *CF = CN->getConstantFPValue();

2534 Consts[i] = ConstantInt::get(IntTy, A.getZExtValue());

2535 } else {

2536 AllConst = false;

2537 }

2538 }

2539 return AllConst;

2540}

2541

2547

2549 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);

2550

2551 unsigned First, Num = Elem.size();

2553 if (!isUndef(Elem[First]))

2554 break;

2555 }

2556 if (First == Num)

2558

2559 if (AllConst &&

2561 return getZero(dl, VecTy, DAG);

2562

2563 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {

2565 if (AllConst) {

2566

2567

2568 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |

2569 Consts[1]->getZExtValue() << 16;

2571 }

2573 if (ElemTy == MVT::f16) {

2576 } else {

2577 E0 = Elem[0];

2578 E1 = Elem[1];

2579 }

2580 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {E1, E0}, DAG);

2582 }

2583

2584 if (ElemTy == MVT::i8) {

2585

2586 if (AllConst) {

2587 uint32_t V = (Consts[0]->getZExtValue() & 0xFF) |

2588 (Consts[1]->getZExtValue() & 0xFF) << 8 |

2589 (Consts[2]->getZExtValue() & 0xFF) << 16 |

2590 Consts[3]->getZExtValue() << 24;

2592 }

2593

2594

2595 bool IsSplat = true;

2596 for (unsigned i = First+1; i != Num; ++i) {

2597 if (Elem[i] == Elem[First] || isUndef(Elem[i]))

2598 continue;

2599 IsSplat = false;

2600 break;

2601 }

2602 if (IsSplat) {

2603

2606 }

2607

2608

2609

2610

2613 for (unsigned i = 0; i != 4; ++i) {

2616 }

2622

2623 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);

2624 return DAG.getBitcast(MVT::v4i8, R);

2625 }

2626

2627#ifndef NDEBUG

2628 dbgs() << "VecTy: " << VecTy << '\n';

2629#endif

2631}

2632

2638

2640 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);

2641

2642 unsigned First, Num = Elem.size();

2644 if (!isUndef(Elem[First]))

2645 break;

2646 }

2647 if (First == Num)

2649

2650 if (AllConst &&

2652 return getZero(dl, VecTy, DAG);

2653

2654

2655 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {

2656 bool IsSplat = true;

2657 for (unsigned i = First+1; i != Num; ++i) {

2658 if (Elem[i] == Elem[First] || isUndef(Elem[i]))

2659 continue;

2660 IsSplat = false;

2661 break;

2662 }

2663 if (IsSplat) {

2664

2669 }

2670 }

2671

2672

2673 if (AllConst) {

2674 uint64_t Val = 0;

2676 uint64_t Mask = (1ull << W) - 1;

2677 for (unsigned i = 0; i != Num; ++i)

2678 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask);

2681 }

2682

2683

2685 SDValue L = (ElemTy == MVT::i32)

2686 ? Elem[0]

2687 : buildVector32(Elem.take_front(Num/2), dl, HalfTy, DAG);

2688 SDValue H = (ElemTy == MVT::i32)

2689 ? Elem[1]

2690 : buildVector32(Elem.drop_front(Num/2), dl, HalfTy, DAG);

2691 return getCombine(H, L, dl, VecTy, DAG);

2692}

2693

2695HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV,

2698 MVT VecTy = ty(VecV);

2702 return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG);

2703

2707 assert((VecWidth % ElemWidth) == 0);

2708 assert(VecWidth == 32 || VecWidth == 64);

2709

2710

2711 MVT ScalarTy = tyScalar(VecTy);

2712 VecV = DAG.getBitcast(ScalarTy, VecV);

2713

2716

2718 unsigned Off = IdxN->getZExtValue() * ElemWidth;

2719 if (VecWidth == 64 && ValWidth == 32) {

2720 assert(Off == 0 || Off == 32);

2721 ExtV = Off == 0 ? LoHalf(VecV, DAG) : HiHalf(VecV, DAG);

2722 } else if (Off == 0 && (ValWidth % 8) == 0) {

2724 } else {

2726

2727

2729 {VecV, WidthV, OffV});

2730 }

2731 } else {

2732 if (ty(IdxV) != MVT::i32)

2735 DAG.getConstant(ElemWidth, dl, MVT::i32));

2737 {VecV, WidthV, OffV});

2738 }

2739

2740

2741 ExtV = DAG.getZExtOrTrunc(ExtV, dl, tyScalar(ResTy));

2743 return ExtV;

2744}

2745

2747HexagonTargetLowering::extractVectorPred(SDValue VecV, SDValue IdxV,

2750

2751

2752 MVT VecTy = ty(VecV);

2756 "Vector elements should equal vector width size");

2757 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);

2758

2759

2761

2762

2763

2765 }

2766

2767

2768 if (ValWidth == 1) {

2769 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);

2773 }

2774

2775

2776

2777

2778

2779 unsigned Scale = VecWidth / ValWidth;

2780

2781

2782

2783 assert(ty(IdxV) == MVT::i32);

2784 unsigned VecRep = 8 / VecWidth;

2786 DAG.getConstant(8*VecRep, dl, MVT::i32));

2789 while (Scale > 1) {

2790

2791

2792 T1 = LoHalf(T1, DAG);

2793 T1 = expandPredicate(T1, dl, DAG);

2794 Scale /= 2;

2795 }

2796

2798}

2799

2804 MVT VecTy = ty(VecV);

2806 return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG);

2807

2810 assert(VecWidth == 32 || VecWidth == 64);

2811 assert((VecWidth % ValWidth) == 0);

2812

2813

2815

2816

2817 unsigned VW = ty(ValV).getSizeInBits();

2819 VecV = DAG.getBitcast(ScalarTy, VecV);

2820 if (VW != VecWidth)

2822

2825

2827 unsigned W = C->getZExtValue() * ValWidth;

2830 {VecV, ValV, WidthV, OffV});

2831 } else {

2832 if (ty(IdxV) != MVT::i32)

2836 {VecV, ValV, WidthV, OffV});

2837 }

2838

2839 return DAG.getNode(ISD::BITCAST, dl, VecTy, InsV);

2840}

2841

2843HexagonTargetLowering::insertVectorPred(SDValue VecV, SDValue ValV,

2846 MVT VecTy = ty(VecV);

2848

2849 if (ValTy == MVT::i1) {

2850 SDValue ToReg = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);

2856 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Ins}, DAG);

2857 }

2858

2863

2866

2867 for (unsigned R = Scale; R > 1; R /= 2) {

2868 ValR = contractPredicate(ValR, dl, DAG);

2869 ValR = getCombine(DAG.getUNDEF(MVT::i32), ValR, dl, MVT::i64, DAG);

2870 }

2871

2878}

2879

2881HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl,

2883 assert(ty(Vec32).getSizeInBits() == 32);

2884 if (isUndef(Vec32))

2885 return DAG.getUNDEF(MVT::i64);

2889}

2890

2892HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl,

2894 assert(ty(Vec64).getSizeInBits() == 64);

2895 if (isUndef(Vec64))

2896 return DAG.getUNDEF(MVT::i32);

2897

2900 {0, 2, 4, 6, 1, 3, 5, 7});

2901 return extractVector(S, DAG.getConstant(0, dl, MVT::i32), dl, MVT::v4i8,

2902 MVT::i32, DAG);

2903}

2904

2907 const {

2910 if (W <= 64)

2913 }

2914

2920}

2921

2924 const {

2925 MVT ValTy = ty(Val);

2927

2930 if (ValLen == ResLen)

2931 return Val;

2932

2933 const SDLoc &dl(Val);

2934 assert(ValLen < ResLen);

2935 assert(ResLen % ValLen == 0);

2936

2938 for (unsigned i = 1, e = ResLen / ValLen; i < e; ++i)

2940

2942}

2943

2947 MVT ElemTy = ty(Hi);

2949

2955 }

2956

2964}

2965

2968 MVT VecTy = ty(Op);

2972 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i)

2973 Ops.push_back(Op.getOperand(i));

2974

2975 if (BW == 32)

2976 return buildVector32(Ops, dl, VecTy, DAG);

2977 if (BW == 64)

2978 return buildVector64(Ops, dl, VecTy, DAG);

2979

2980 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {

2981

2982 bool All0 = true, All1 = true;

2985 if (CN == nullptr) {

2986 All0 = All1 = false;

2987 break;

2988 }

2989 uint32_t C = CN->getZExtValue();

2990 All0 &= (C == 0);

2991 All1 &= (C == 1);

2992 }

2993 if (All0)

2995 if (All1)

2997

2998

2999

3000

3002 SDValue Z = getZero(dl, MVT::i32, DAG);

3003

3005 for (unsigned i = 0; i != 8; ++i) {

3007 Rs[i] = DAG.getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);

3008 }

3010 for (unsigned i = 0, e = A.size()/2; i != e; ++i)

3011 Rs[i] = DAG.getNode(ISD::OR, dl, MVT::i32, Rs[2*i], Rs[2*i+1]);

3012 }

3013

3014 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);

3015 }

3016

3018}

3019

3023 MVT VecTy = ty(Op);

3026 assert(Op.getNumOperands() == 2);

3027 return getCombine(Op.getOperand(1), Op.getOperand(0), dl, VecTy, DAG);

3028 }

3029

3031 if (ElemTy == MVT::i1) {

3032 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);

3033 MVT OpTy = ty(Op.getOperand(0));

3034

3035

3037 assert(Scale == Op.getNumOperands() && Scale > 1);

3038

3039

3040

3041

3042

3044 unsigned IdxW = 0;

3045

3046 for (SDValue P : Op.getNode()->op_values()) {

3048 for (unsigned R = Scale; R > 1; R /= 2) {

3049 W = contractPredicate(W, dl, DAG);

3050 W = getCombine(DAG.getUNDEF(MVT::i32), W, dl, MVT::i64, DAG);

3051 }

3052 W = LoHalf(W, DAG);

3054 }

3055

3056 while (Scale > 2) {

3058 Words[IdxW ^ 1].clear();

3059

3060 for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) {

3061 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];

3062

3064 {W0, W1, WidthV, WidthV});

3066 }

3067 IdxW ^= 1;

3068 Scale /= 2;

3069 }

3070

3071

3072 assert(Scale == 2 && Words[IdxW].size() == 2);

3073

3074 SDValue WW = getCombine(Words[IdxW][1], Words[IdxW][0], dl, MVT::i64, DAG);

3076 }

3077

3079}

3080

3086 return extractVector(Vec, Op.getOperand(1), SDLoc(Op), ElemTy, ty(Op), DAG);

3087}

3088

3092 return extractVector(Op.getOperand(0), Op.getOperand(1), SDLoc(Op),

3093 ty(Op), ty(Op), DAG);

3094}

3095

3099 return insertVector(Op.getOperand(0), Op.getOperand(1), Op.getOperand(2),

3101}

3102

3106 SDValue ValV = Op.getOperand(1);

3107 return insertVector(Op.getOperand(0), ValV, Op.getOperand(2),

3108 SDLoc(Op), ty(ValV), DAG);

3109}

3110

3111bool

3113

3114

3116 return false;

3117

3118

3119

3120

3122}

3123

3126 MVT Ty = ty(Op);

3131

3132 bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1;

3133 if (LoadPred) {

3140 }

3141

3143 if (!validateConstPtrAlignment(LN->getBasePtr(), ClaimAlign, dl, DAG))

3144 return replaceMemWithUndef(Op, DAG);

3145

3146

3147

3149 if (LoadPred) {

3150 SDValue TP = getInstr(Hexagon::C2_tfrrp, dl, MemTy, {LU}, DAG);

3155 }

3158 }

3159 return LU;

3160}

3161

3167 MVT Ty = ty(Val);

3168

3169 if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) {

3170

3171 SDValue TR = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {Val}, DAG);

3177 }

3179 }

3180

3182 if (!validateConstPtrAlignment(SN->getBasePtr(), ClaimAlign, dl, DAG))

3183 return replaceMemWithUndef(Op, DAG);

3184

3186 Align NeedAlign = Subtarget.getTypeAlignment(StoreTy);

3187 if (ClaimAlign < NeedAlign)

3190}

3191

3194 const {

3196 MVT LoadTy = ty(Op);

3197 unsigned NeedAlign = Subtarget.getTypeAlignment(LoadTy).value();

3199 if (HaveAlign >= NeedAlign)

3200 return Op;

3201

3205

3206

3207

3208 bool DoDefault = false;

3209

3211 DoDefault = true;

3212

3216 return Op;

3217 DoDefault = true;

3218 }

3219 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {

3220

3223 DoDefault =

3225 }

3226 if (DoDefault) {

3229 }

3230

3231

3232

3233

3234

3235

3237

3238 unsigned LoadLen = NeedAlign;

3241 auto BO = getBaseAndOffset(Base);

3242 unsigned BaseOpc = BO.first.getOpcode();

3244 return Op;

3245

3246 if (BO.second % LoadLen != 0) {

3247 BO.first = DAG.getNode(ISD::ADD, dl, MVT::i32, BO.first,

3248 DAG.getConstant(BO.second % LoadLen, dl, MVT::i32));

3249 BO.second -= BO.second % LoadLen;

3250 }

3253 DAG.getConstant(NeedAlign, dl, MVT::i32))

3254 : BO.first;

3259

3264 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen),

3265 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),

3266 MMO->getSuccessOrdering(), MMO->getFailureOrdering());

3267 }

3268

3269 SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO);

3270 SDValue Load1 = DAG.getLoad(LoadTy, dl, Chain, Base1, WideMMO);

3271

3273 {Load1, Load0, BaseNoOff.getOperand(0)});

3277 return M;

3278}

3279

3282 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);

3284 if (!CY)

3286

3288 SDVTList VTs = Op.getNode()->getVTList();

3291 unsigned Opc = Op.getOpcode();

3292

3293 if (CY) {

3294 uint64_t VY = CY->getZExtValue();

3295 assert(VY != 0 && "This should have been folded");

3296

3297 if (VY != 1)

3299

3305 }

3311 }

3312 }

3313

3315}

3316

3320 unsigned Opc = Op.getOpcode();

3321 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), C = Op.getOperand(2);

3322

3325 { X, Y, C });

3326

3327 EVT CarryTy = C.getValueType();

3329 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });

3333}

3334

3337 SDValue Chain = Op.getOperand(0);

3339 SDValue Handler = Op.getOperand(2);

3342

3343

3347

3348 unsigned OffsetReg = Hexagon::R28;

3349

3355

3356

3357

3358

3360}

3361

3364 unsigned Opc = Op.getOpcode();

3365

3366 if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR)

3368

3369 if (isHvxOperation(Op.getNode(), DAG)) {

3370

3371 if (SDValue V = LowerHvxOperation(Op, DAG))

3372 return V;

3373 }

3374

3375 switch (Opc) {

3376 default:

3377#ifndef NDEBUG

3378 Op.getNode()->dumpr(&DAG);

3380 errs() << "Error: check for a non-legal type in this operation\n";

3381#endif

3383

3394 case ISD::LOAD: return LowerLoad(Op, DAG);

3395 case ISD::STORE: return LowerStore(Op, DAG);

3424 break;

3425 }

3426

3428}

3429

3430void

3434 if (isHvxOperation(N, DAG)) {

3435 LowerHvxOperationWrapper(N, Results, DAG);

3437 return;

3438 }

3439

3441 unsigned Opc = N->getOpcode();

3442

3443 switch (Opc) {

3446 Results.push_back(opJoin(SplitVectorOp(Op, DAG), SDLoc(Op), DAG));

3447 break;

3448 case ISD::STORE:

3449

3450

3451

3452

3453

3454 return;

3455 default:

3457 break;

3458 }

3459}

3460

3461void

3465 if (isHvxOperation(N, DAG)) {

3466 ReplaceHvxNodeResults(N, Results, DAG);

3468 return;

3469 }

3470

3472 switch (N->getOpcode()) {

3476 return;

3477 case ISD::BITCAST:

3478

3479 if (N->getValueType(0) == MVT::i8) {

3480 if (N->getOperand(0).getValueType() == MVT::v8i1) {

3481 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,

3482 N->getOperand(0), DAG);

3485 }

3486 }

3487 break;

3488 }

3489}

3490

3494 if (isHvxOperation(N, DCI.DAG)) {

3495 if (SDValue V = PerformHvxDAGCombine(N, DCI))

3496 return V;

3498 }

3499

3502 unsigned Opc = Op.getOpcode();

3503

3506

3508 EVT TruncTy = Op.getValueType();

3510

3512 return Elem0;

3513

3516 }

3517 }

3518

3521

3524 switch (P.getOpcode()) {

3528 return getZero(dl, ty(Op), DCI.DAG);

3529 default:

3530 break;

3531 }

3533

3534

3535

3538 SDValue C0 = Cond.getOperand(0), C1 = Cond.getOperand(1);

3541 Op.getOperand(2), Op.getOperand(1));

3542 return VSel;

3543 }

3544 }

3547

3549 MVT TruncTy = ty(Op);

3551

3552 if (ty(Elem0) == TruncTy)

3553 return Elem0;

3554

3555 if (ty(Elem0).bitsGT(TruncTy))

3557 }

3559

3560

3561 auto fold0 = [&, this](SDValue Op) {

3562 if (ty(Op) != MVT::i64)

3568

3571

3574 if (Amt && Amt->getZExtValue() >= 32 && ty(Z).getSizeInBits() <= 32) {

3575 unsigned A = Amt->getZExtValue();

3582 }

3584 };

3585

3587 return R;

3588 }

3589

3591}

3592

3593

3602

3603

3604

3605

3606

3609 if (Constraint.size() == 1) {

3610 switch (Constraint[0]) {

3611 case 'q':

3612 case 'v':

3613 if (Subtarget.useHVXOps())

3615 break;

3616 case 'a':

3618 default:

3619 break;

3620 }

3621 }

3623}

3624

3625std::pair<unsigned, const TargetRegisterClass*>

3628

3629 if (Constraint.size() == 1) {

3630 switch (Constraint[0]) {

3631 case 'r':

3633 default:

3634 return {0u, nullptr};

3635 case MVT::i1:

3636 case MVT::i8:

3637 case MVT::i16:

3638 case MVT::i32:

3639 case MVT::f32:

3640 return {0u, &Hexagon::IntRegsRegClass};

3641 case MVT::i64:

3642 case MVT::f64:

3643 return {0u, &Hexagon::DoubleRegsRegClass};

3644 }

3645 break;

3646 case 'a':

3647 if (VT != MVT::i32)

3648 return {0u, nullptr};

3649 return {0u, &Hexagon::ModRegsRegClass};

3650 case 'q':

3652 default:

3653 return {0u, nullptr};

3654 case 64:

3655 case 128:

3656 return {0u, &Hexagon::HvxQRRegClass};

3657 }

3658 break;

3659 case 'v':

3661 default:

3662 return {0u, nullptr};

3663 case 512:

3664 return {0u, &Hexagon::HvxVRRegClass};

3665 case 1024:

3666 if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps())

3667 return {0u, &Hexagon::HvxVRRegClass};

3668 return {0u, &Hexagon::HvxWRRegClass};

3669 case 2048:

3670 return {0u, &Hexagon::HvxWRRegClass};

3671 }

3672 break;

3673 default:

3674 return {0u, nullptr};

3675 }

3676 }

3677

3679}

3680

3681

3682

3683

3685 bool ForCodeSize) const {

3686 return true;

3687}

3688

3689

3690

3692 Type *Ty) const {

3694 return false;

3695

3696 assert(Ty->isIntegerTy());

3697 unsigned BitSize = Ty->getPrimitiveSizeInBits();

3698 return (BitSize > 0 && BitSize <= 64);

3699}

3700

3701

3702

3706 if (Ty->isSized()) {

3707

3708

3709

3710

3711

3712

3713

3714 Align A = DL.getABITypeAlign(Ty);

3715

3717 return false;

3718

3720 return false;

3721 }

3722

3723

3725 return false;

3726

3727 int Scale = AM.Scale;

3728 if (Scale < 0)

3729 Scale = -Scale;

3730 switch (Scale) {

3731 case 0:

3732 break;

3733 default:

3734 return false;

3735 }

3736 return true;

3737}

3738

3739

3740

3742 const {

3743 return HTM.getRelocationModel() == Reloc::Static;

3744}

3745

3746

3747

3748

3749

3751 return Imm >= -512 && Imm <= 511;

3752}

3753

3754

3755

3756

3760 bool IsVarArg,

3761 bool IsCalleeStructRet,

3762 bool IsCallerStructRet,

3769 bool CCMatch = CallerCC == CalleeCC;

3770

3771

3772

3773

3774

3775

3776

3779 return false;

3780 }

3781

3782

3783

3784 if (!CCMatch) {

3787

3788 if (!R || !E)

3789 return false;

3790 }

3791

3792

3793 if (IsVarArg)

3794 return false;

3795

3796

3797

3798 if (IsCalleeStructRet || IsCallerStructRet)

3799 return false;

3800

3801

3802

3803

3804

3805 return true;

3806}

3807

3808

3809

3810

3811

3812

3813

3814

3815

3816

3817

3818

3821 const AttributeList &FuncAttributes) const {

3822 if (Op.size() >= 8 && Op.isAligned(Align(8)))

3823 return MVT::i64;

3824 if (Op.size() >= 4 && Op.isAligned(Align(4)))

3825 return MVT::i32;

3826 if (Op.size() >= 2 && Op.isAligned(Align(2)))

3827 return MVT::i16;

3828 return MVT::Other;

3829}

3830

3835 return false;

3837 if (Subtarget.isHVXVectorType(SVT, true))

3838 return allowsHvxMemoryAccess(SVT, Flags, Fast);

3840 Context, DL, VT, AddrSpace, Alignment, Flags, Fast);

3841}

3842

3845 unsigned *Fast) const {

3847 return false;

3849 if (Subtarget.isHVXVectorType(SVT, true))

3850 return allowsHvxMisalignedMemoryAccesses(SVT, Flags, Fast);

3853 return false;

3854}

3855

3856std::pair<const TargetRegisterClass*, uint8_t>

3858 MVT VT) const {

3862

3864 return std::make_pair(&Hexagon::HvxQRRegClass, 1);

3866 return std::make_pair(&Hexagon::HvxVRRegClass, 1);

3868 return std::make_pair(&Hexagon::HvxWRRegClass, 1);

3869 }

3870

3872}

3873

3876 std::optional ByteOffset) const {

3877

3879 ByteOffset))

3880 return false;

3881

3883 std::pair<SDValue, int> BO = getBaseAndOffset(L->getBasePtr());

3884

3886 return false;

3890 return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, HTM);

3891 }

3892 return true;

3893}

3894

3897 AdjustHvxInstrPostInstrSelection(MI, Node);

3898}

3899

3903 unsigned SZ = ValueTy->getPrimitiveSizeInBits();

3904 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");

3905 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked

3906 : Intrinsic::hexagon_L4_loadd_locked;

3907

3909 Builder.CreateIntrinsic(IntID, Addr, nullptr, "larx");

3910

3911 return Builder.CreateBitCast(Call, ValueTy);

3912}

3913

3914

3915

3919 BasicBlock *BB = Builder.GetInsertBlock();

3923

3925 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");

3926 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked

3927 : Intrinsic::hexagon_S4_stored_locked;

3928

3929 Val = Builder.CreateBitCast(Val, CastTy);

3930

3931 Value *Call = Builder.CreateIntrinsic(IntID, {Addr, Val},

3932 nullptr, "stcx");

3933 Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");

3935 return Ext;

3936}

3937

3945

3948

3949 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64

3952}

3953

3959

3962

3963

3965 if (!Mask)

3966 return false;

3967 return Mask->getValue().isPowerOf2();

3968}

3969

3970

3971

3974 if (N->getNumValues() != 1)

3975 return false;

3976 if (N->hasNUsesOfValue(1, 0))

3977 return false;

3978

3979 SDNode *Copy = *N->user_begin();

3980

3981 if (Copy->getOpcode() == ISD::BITCAST) {

3983 }

3984

3986 return false;

3987 }

3988

3989

3990

3991 if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == MVT::Glue)

3992 return false;

3993

3994

3995 bool HasRet = false;

3996 for (SDNode *Node : Copy->users()) {

3998 return false;

3999 HasRet = true;

4000 }

4001 if (!HasRet)

4002 return false;

4003

4004 Chain = Copy->getOperand(0);

4005 return true;

4006}

unsigned const MachineRegisterInfo * MRI

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

This file implements a class to represent arbitrary precision integral constant values and operations...

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Function Alias Analysis Results

static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static cl::opt< int > MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memcpy"))

static cl::opt< bool > ConstantLoadsToImm("constant-loads-to-imm", cl::Hidden, cl::init(true), cl::desc("Convert constant loads to immediate values."))

static Value * getUnderLyingObjectForBrevLdIntr(Value *V)

Definition HexagonISelLowering.cpp:2095

static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)

Definition HexagonISelLowering.cpp:121

static cl::opt< bool > AlignLoads("hexagon-align-loads", cl::Hidden, cl::init(false), cl::desc("Rewrite unaligned loads as a pair of aligned loads"))

static bool isBrevLdIntrinsic(const Value *Inst)

Definition HexagonISelLowering.cpp:2040

static cl::opt< int > MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memmove"))

static cl::opt< int > MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memmove"))

static Value * getBrevLdObject(Value *V)

Definition HexagonISelLowering.cpp:2053

static cl::opt< int > MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::init(8), cl::desc("Max #stores to inline memset"))

static cl::opt< bool > DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden, cl::init(false), cl::desc("Disable minimum alignment of 1 for " "arguments passed by value on stack"))

static Value * returnEdge(const PHINode *PN, Value *IntrBaseVal)

Definition HexagonISelLowering.cpp:2065

static cl::opt< int > MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memcpy"))

static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)

CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...

Definition HexagonISelLowering.cpp:239

static cl::opt< int > MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memset"))

static cl::opt< bool > EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden, cl::desc("Control jump table emission on Hexagon target"))

static cl::opt< int > MinimumJumpTables("minimum-jump-tables", cl::Hidden, cl::init(5), cl::desc("Set minimum jump tables"))

static cl::opt< bool > EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden, cl::desc("Enable Hexagon SDNode scheduling"))

#define Hexagon_PointerSize

#define HEXAGON_LRFP_SIZE

#define HEXAGON_GOT_SYM_NAME

Module.h This file contains the declarations for the Module class.

const size_t AbstractManglingParser< Derived, Alloc >::NumOps

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

std::pair< MCSymbol *, MachineModuleInfoImpl::StubValueTy > PairTy

Register const TargetRegisterInfo * TRI

Promote Memory to Register

const SmallVectorImpl< MachineOperand > & Cond

This file defines the SmallVector class.

This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...

static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")

static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")

static llvm::Type * getVectorElementType(llvm::Type *Ty)

APInt bitcastToAPInt() const

Class for arbitrary precision integers.

int64_t getSExtValue() const

Get sign extended value.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

ArrayRef< T > drop_front(size_t N=1) const

Drop the first N elements of the array.

size_t size() const

size - Get the array size.

An instruction that atomically checks whether a specified value is in a memory location,...

LLVM Basic Block Representation.

const Function * getParent() const

Return the enclosing method, or null if none.

The address of a basic block.

CCState - This class holds information needed while lowering arguments and return values.

LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)

AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...

LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)

CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...

LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)

AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...

LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)

AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...

uint64_t getStackSize() const

Returns the size of the currently allocated portion of the stack.

LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)

AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...

CCValAssign - Represent assignment of one arg/retval to a location.

Register getLocReg() const

LocInfo getLocInfo() const

int64_t getLocMemOffset() const

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

This class represents a function call, abstracting a target machine's calling convention.

const APFloat & getValueAPF() const

This is the shared class of boolean and integer constants.

bool isZero() const

This is just a convenience method to make client code smaller for a common code.

const APInt & getValue() const

Return the constant as an APInt value reference.

MachineConstantPoolValue * getMachineCPVal() const

bool isMachineConstantPoolEntry() const

const Constant * getConstVal() const

int64_t getSExtValue() const

static LLVM_ABI Constant * get(ArrayRef< Constant * > V)

This is an important base class in LLVM.

A parsed version of the target data layout string in and methods for querying it.

This is the base abstract class for diagnostic reporting in the backend.

Interface for custom diagnostic printing.

bool hasOptSize() const

Optimize this function for size (-Os) or minimum size (-Oz).

CallingConv::ID getCallingConv() const

getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...

bool hasStructRetAttr() const

Determine if the function returns a structure through first or second pointer argument.

int64_t getOffset() const

const GlobalValue * getGlobal() const

Module * getParent()

Get the module that this global value is contained inside of...

LLVM_ABI const GlobalObject * getAliaseeObject() const

Hexagon target-specific information for each MachineFunction.

int getVarArgsFrameIndex()

void setFirstNamedArgFrameIndex(int v)

void setHasEHReturn(bool H=true)

Register getStackRegister() const

Register getFrameRegister(const MachineFunction &MF) const override

const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override

bool isHVXVectorType(EVT VecTy, bool IncludeBool=false) const

unsigned getVectorLength() const

SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override

Returns relocation base for the given PIC jumptable.

Definition HexagonISelLowering.cpp:3595

SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1266

SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1339

bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override

Return if the target supports combining a chain like:

Definition HexagonISelLowering.cpp:3960

SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:3097

void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override

This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.

Definition HexagonISelLowering.cpp:3895

bool isTargetCanonicalConstantNode(SDValue Op) const override

Returns true if the given Opc is considered a canonical constant for the target, which should not be ...

Definition HexagonISelLowering.cpp:2217

ConstraintType getConstraintType(StringRef Constraint) const override

Given a constraint, return the type of constraint it is for this target.

Definition HexagonISelLowering.cpp:3608

bool isTruncateFree(Type *Ty1, Type *Ty2) const override

Return true if it's free to truncate a value of type FromTy to type ToTy.

Definition HexagonISelLowering.cpp:2182

MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override

Certain combinations of ABIs, Targets and features require that types are legal for some operations a...

Definition HexagonISelLowering.cpp:205

SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1020

SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:3336

SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:3281

Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override

Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.

Definition HexagonISelLowering.cpp:3900

bool isLegalICmpImmediate(int64_t Imm) const override

isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...

Definition HexagonISelLowering.cpp:3750

bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset) const override

Return true if it is profitable to reduce a load to a smaller type.

Definition HexagonISelLowering.cpp:3874

bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override

isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...

Definition HexagonISelLowering.cpp:3703

SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:723

AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override

Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.

Definition HexagonISelLowering.cpp:3947

SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, SDValue InGlue, EVT PtrVT, unsigned ReturnReg, unsigned char OperandGlues) const

Definition HexagonISelLowering.cpp:1348

SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override

This hook must be implemented to lower outgoing return values, described by the Outs array,...

Definition HexagonISelLowering.cpp:266

bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override

Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...

Definition HexagonISelLowering.cpp:2117

SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override

This method will be invoked for all target nodes and for any target-independent nodes that the target...

Definition HexagonISelLowering.cpp:3492

SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1485

bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override

Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...

Definition HexagonISelLowering.cpp:686

SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:3193

SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:715

SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1087

unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override

Certain targets such as MIPS require that some types such as vectors are always broken down into scal...

Definition HexagonISelLowering.cpp:143

SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override

This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...

Definition HexagonISelLowering.cpp:851

bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override

isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.

Definition HexagonISelLowering.cpp:3684

bool mayBeEmittedAsTailCall(const CallInst *CI) const override

Return true if the target may be able emit the call instruction as a tail call.

Definition HexagonISelLowering.cpp:328

AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override

Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.

Definition HexagonISelLowering.cpp:3939

bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override

Return true if result of the specified node is used by a return node only.

Definition HexagonISelLowering.cpp:3972

SDValue LowerCallResult(SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals, const SmallVectorImpl< SDValue > &OutVals, SDValue Callee) const

LowerCallResult - Lower the result values of an ISD::CALL into the appropriate copies out of appropri...

Definition HexagonISelLowering.cpp:411

SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1187

SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1385

SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1449

bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override

Return true if the target supports a memory access of this type for the given address space and align...

Definition HexagonISelLowering.cpp:3831

SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:3104

bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override

Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...

Definition HexagonISelLowering.cpp:2203

SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:2485

SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:821

SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:220

SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:3125

SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override

This callback is invoked for operations that are unsupported by the target, which are registered to u...

Definition HexagonISelLowering.cpp:3363

bool isShuffleMaskLegal(ArrayRef< int > Mask, EVT VT) const override

Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...

Definition HexagonISelLowering.cpp:2222

LegalizeAction getCustomOperationAction(SDNode &Op) const override

How to legalize this custom operation?

Definition HexagonISelLowering.cpp:2254

SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1427

SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1230

bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override

Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...

Definition HexagonISelLowering.cpp:3112

SDValue LowerUAddSubOCarry(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:3317

bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override

Definition HexagonISelLowering.cpp:2198

bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override

Returns true if it is beneficial to convert a load of a constant to just the constant itself.

Definition HexagonISelLowering.cpp:3691

SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:786

SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1104

SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override

LowerCall - Functions arguments are copied from virtual regs to (physical regs)/(stack frame),...

Definition HexagonISelLowering.cpp:465

bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override

Determine if the target supports unaligned memory accesses.

Definition HexagonISelLowering.cpp:3843

const char * getTargetNodeName(unsigned Opcode) const override

This method returns the name of a target specific DAG node.

Definition HexagonISelLowering.cpp:1925

SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:3163

SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:771

SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:3082

void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override

This callback is invoked when a node result type is illegal for the target, and the operation was reg...

Definition HexagonISelLowering.cpp:3462

Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override

Perform a store-conditional operation to Addr.

Definition HexagonISelLowering.cpp:3916

bool hasBitTest(SDValue X, SDValue Y) const override

Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...

Definition HexagonISelLowering.cpp:2178

HexagonTargetLowering(const TargetMachine &TM, const HexagonSubtarget &ST)

Definition HexagonISelLowering.cpp:1505

SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:2967

bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override

Return true if folding a constant offset with the given GlobalAddress is legal.

Definition HexagonISelLowering.cpp:3741

bool IsEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet, bool isCallerStructRet, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SmallVectorImpl< ISD::InputArg > &Ins, SelectionDAG &DAG) const

IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.

Definition HexagonISelLowering.cpp:3757

SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1164

void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override

This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...

Definition HexagonISelLowering.cpp:3431

SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:3021

SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:2438

SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:806

SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1243

AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override

Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.

Definition HexagonISelLowering.cpp:3955

std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override

Given a physical register constraint (e.g.

Definition HexagonISelLowering.cpp:3626

SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:2492

bool isFMAFasterThanFMulAndFAdd(const MachineFunction &, EVT) const override

Return true if an FMA operation is faster than a pair of mul and add instructions.

Definition HexagonISelLowering.cpp:2192

SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:3090

EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override

Returns the target specific optimal type for load and store operations as a result of memset,...

Definition HexagonISelLowering.cpp:3819

SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:2276

LegalizeTypeAction getPreferredVectorAction(MVT VT) const override

Return the preferred vector type legalization action.

Definition HexagonISelLowering.cpp:2228

bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override

This hook should be implemented to check whether the return values described by the Outs array can fi...

Definition HexagonISelLowering.cpp:250

SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1289

Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override

Return the register ID of the name passed in.

Definition HexagonISelLowering.cpp:333

std::pair< MVT, unsigned > handleMaskRegisterForCallingConv(const HexagonSubtarget &Subtarget, EVT VT) const

Definition HexagonISelLowering.cpp:184

SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1323

SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:1283

SDValue LowerREADSTEADYCOUNTER(SDValue Op, SelectionDAG &DAG) const

Definition HexagonISelLowering.cpp:798

Common base class shared among various IRBuilders.

ConstantInt * getInt8(uint8_t C)

Get a constant 8-bit value.

This provides a uniform API for creating instructions and inserting them into a basic block: either a...

Class to represent integer types.

static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)

This static method is the primary way of constructing an IntegerType.

This is an important class for using LLVM in a threaded context.

LLVM_ABI void diagnose(const DiagnosticInfo &DI)

Report a message to the currently installed diagnostic handler.

Base class for LoadSDNode and StoreSDNode.

ISD::MemIndexedMode getAddressingMode() const

Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...

bool isUnindexed() const

Return true if this is NOT a pre/post inc/dec load/store.

bool isIndexed() const

Return true if this is a pre/post inc/dec load/store.

An instruction for reading from memory.

This class is used to represent ISD::LOAD nodes.

const SDValue & getBasePtr() const

const SDValue & getOffset() const

ISD::LoadExtType getExtensionType() const

Return whether this is a plain node, or one of the varieties of value-extending loads.

@ INVALID_SIMPLE_VALUE_TYPE

unsigned getVectorMinNumElements() const

Given a vector type, return the minimum number of elements it contains.

unsigned getVectorNumElements() const

bool isVector() const

Return true if this is a vector value type.

bool isInteger() const

Return true if this is an integer or a vector integer type.

bool isScalableVector() const

Return true if this is a vector value type where the runtime length is machine dependent.

static LLVM_ABI MVT getVT(Type *Ty, bool HandleUnknown=false)

Return the value type corresponding to the specified type.

static auto integer_valuetypes()

TypeSize getSizeInBits() const

Returns the size of the specified MVT in bits.

static auto fixedlen_vector_valuetypes()

bool isScalarInteger() const

Return true if this is an integer, not including vectors.

TypeSize getStoreSizeInBits() const

Return the number of bits overwritten by a store of the specified value type.

static MVT getVectorVT(MVT VT, unsigned NumElements)

MVT getVectorElementType() const

bool isFloatingPoint() const

Return true if this is a FP or a vector FP type.

static MVT getIntegerVT(unsigned BitWidth)

static auto fp_valuetypes()

LLVM_ABI void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)

Create a new object at a fixed location on the stack.

void setAdjustsStack(bool V)

LLVM_ABI void ensureMaxAlignment(Align Alignment)

Make sure the function is at least Align bytes aligned.

void setFrameAddressIsTaken(bool T)

void setHasTailCall(bool V=true)

void setReturnAddressIsTaken(bool s)

unsigned getNumFixedObjects() const

Return the number of fixed objects.

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

const DataLayout & getDataLayout() const

Return the DataLayout attached to the Module associated to this MF.

Function & getFunction()

Return the LLVM function that this machine code represents.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)

addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...

Representation of each machine instruction.

A description of a memory reference used in the backend.

Flags

Flags values. These may be or'd together.

@ MOVolatile

The memory access is volatile.

@ MOLoad

The memory access reads data.

@ MOStore

The memory access writes data.

Flags getFlags() const

Return the raw flags of the source value,.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

const MDNode * getRanges() const

Returns the Ranges that describes the dereference.

AAMDNodes getAAInfo() const

Returns the AA info that describes the dereference.

MachineMemOperand * getMemOperand() const

Return a MachineMemOperand object describing the memory reference performed by operation.

const MachinePointerInfo & getPointerInfo() const

const SDValue & getChain() const

EVT getMemoryVT() const

Return the type of the in-memory value.

A Module instance is used to store all the information related to an LLVM module.

MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...

unsigned getOpcode() const

Return the opcode for this Instruction or ConstantExpr.

BasicBlock * getIncomingBlock(unsigned i) const

Return incoming basic block number i.

Value * getIncomingValue(unsigned i) const

Return incoming value number x.

unsigned getNumIncomingValues() const

Return the number of incoming edges.

Wrapper class representing virtual and physical registers.

Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...

const DebugLoc & getDebugLoc() const

Represents one node in the SelectionDAG.

EVT getValueType(unsigned ResNo) const

Return the type of a specified result.

Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.

SDNode * getNode() const

get the SDNode which holds the desired result

SDValue getValue(unsigned R) const

EVT getValueType() const

Return the ValueType of the referenced return value.

const SDValue & getOperand(unsigned i) const

unsigned getOpcode() const

This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...

SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)

const TargetSubtargetInfo & getSubtarget() const

SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)

LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)

Create a MERGE_VALUES node from the given operands.

LLVM_ABI SDVTList getVTList(EVT VT)

Return an SDVTList that represents the list of values specified.

LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)

SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)

Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...

LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)

Create a ConstantFPSDNode wrapping a constant value.

LLVM_ABI SDValue getRegister(Register Reg, EVT VT)

LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)

Loads are not normal binary operators: their result type is not determined by their operands,...

SDValue getGLOBAL_OFFSET_TABLE(EVT VT)

Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.

LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)

SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)

SDValue getUNDEF(EVT VT)

Return an UNDEF node. UNDEF does not have a useful SDLoc.

SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)

Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).

LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)

Return a bitcast using the SDLoc of the value operand, and casting to the provided type.

SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)

SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())

Helper function to make it easier to build Select's if you just have operands and don't want to check...

LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)

Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.

const DataLayout & getDataLayout() const

LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

Create a ConstantSDNode wrapping a constant value.

LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())

Returns sum of the base pointer and offset.

LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

Helper function to build ISD::STORE nodes.

SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)

Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...

LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)

Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...

LLVM_ABI SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)

LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)

Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...

LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)

LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)

Gets or creates the specified node.

SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)

LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)

Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.

MachineFunction & getMachineFunction() const

LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)

LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)

LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)

Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...

LLVMContext * getContext() const

LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)

SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)

SDValue getEntryNode() const

Return the token chain corresponding to the entry of the function.

LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)

Return an ISD::VECTOR_SHUFFLE node.

LLVM_ABI SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)

Create a logical NOT operation as (XOR Val, BooleanOne).

static void commuteMask(MutableArrayRef< int > Mask)

Change values in a shuffle permute mask assuming the two vector operands have swapped position.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

This class is used to represent ISD::STORE nodes.

const SDValue & getBasePtr() const

const SDValue & getOffset() const

const SDValue & getValue() const

StringRef - Represent a constant reference to a string, i.e.

constexpr size_t size() const

size - Get the string size.

A switch()-like statement whose cases are string literals.

StringSwitch & Case(StringLiteral S, T Value)

void setBooleanVectorContents(BooleanContent Ty)

Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...

void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)

Indicate that the specified operation does not work with the specified type and indicate what to do a...

virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset=std::nullopt) const

Return true if it is profitable to reduce a load to a smaller type.

LegalizeAction

This enum indicates whether operations are valid for a target, and if not, what action should be used...

unsigned MaxStoresPerMemcpyOptSize

Likewise for functions with the OptSize attribute.

virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const

Return the register class that should be used for the specified value type.

const TargetMachine & getTargetMachine() const

virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const

Certain combinations of ABIs, Targets and features require that types are legal for some operations a...

LegalizeTypeAction

This enum indicates whether a types are legal for a target, and if not, what action should be used to...

void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)

Indicate that the specified indexed load does or does not work with the specified type and indicate w...

void setPrefLoopAlignment(Align Alignment)

Set the target's preferred loop alignment.

void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)

Set the maximum atomic operation size supported by the backend.

virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const

Certain targets such as MIPS require that some types such as vectors are always broken down into scal...

void setMinFunctionAlignment(Align Alignment)

Set the target's minimum function alignment.

unsigned MaxStoresPerMemsetOptSize

Likewise for functions with the OptSize attribute.

void setBooleanContents(BooleanContent Ty)

Specify how the target extends the result of integer and floating point boolean values from i1 to a w...

unsigned MaxStoresPerMemmove

Specify maximum number of store instructions per memmove call.

void computeRegisterProperties(const TargetRegisterInfo *TRI)

Once all of the register classes are added, this allows us to compute derived properties we expose.

unsigned MaxStoresPerMemmoveOptSize

Likewise for functions with the OptSize attribute.

void addRegisterClass(MVT VT, const TargetRegisterClass *RC)

Add the specified register class as an available regclass for the specified value type.

void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)

Indicate that the specified indexed store does or does not work with the specified type and indicate ...

virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const

Return the pointer type for the given address space, defaults to the pointer type from the data layou...

void setPrefFunctionAlignment(Align Alignment)

Set the target's preferred function alignment.

unsigned MaxStoresPerMemset

Specify maximum number of store instructions per memset call.

void setMinimumJumpTableEntries(unsigned Val)

Indicate the minimum number of blocks to generate jump tables.

void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified truncating store does not work with the specified type and indicate what ...

@ UndefinedBooleanContent

bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const

Return true if the specified operation is legal on this target or can be made legal with custom lower...

virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const

Return true if the target supports a memory access of this type for the given address space and align...

void setMinCmpXchgSizeInBits(unsigned SizeInBits)

Sets the minimum cmpxchg or ll/sc size supported by the backend.

void setStackPointerRegisterToSaveRestore(Register R)

If set to a physical register, this specifies the register that llvm.savestack/llvm....

void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)

If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...

AtomicExpansionKind

Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.

void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)

Indicate that the specified condition code is or isn't supported on the target and indicate what to d...

virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const

Return the largest legal super-reg register class of the register class for the specified type and it...

void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)

Targets should invoke this method for each target independent node that they want to provide a custom...

void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified load with extension does not work with the specified type and indicate wh...

bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const

This function returns true if the memory access is aligned or if the target allows this specific unal...

unsigned MaxStoresPerMemcpy

Specify maximum number of store instructions per memcpy call.

void setSchedulingPreference(Sched::Preference Pref)

Specify the target scheduling preference.

virtual bool isTargetCanonicalConstantNode(SDValue Op) const

Returns true if the given Opc is considered a canonical constant for the target, which should not be ...

SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const

Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.

virtual ConstraintType getConstraintType(StringRef Constraint) const

Given a constraint, return the type of constraint it is for this target.

std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const

Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.

bool isPositionIndependent() const

virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const

Given a physical register constraint (e.g.

TargetLowering(const TargetLowering &)=delete

virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const

This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...

Primary interface to the complete machine description for the target machine.

bool shouldAssumeDSOLocal(const GlobalValue *GV) const

unsigned getID() const

Return the register class ID number.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

static constexpr TypeSize getFixed(ScalarTy ExactSize)

The instances of the Type class are immutable: once they are created, they are never changed.

static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)

LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY

Return the basic size of this type if it is a primitive type.

bool isIntegerTy() const

True if this is an instance of IntegerType.

static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)

Value * getOperand(unsigned i) const

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

const ParentTy * getParent() const

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

Abstract Attribute helper functions.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ Fast

Attempts to make calls as fast as possible (e.g.

@ C

The default llvm calling convention, compatible with C.

@ MO_PCREL

MO_PCREL - On a symbol operand, indicates a PC-relative relocation Used for computing a global addres...

@ MO_GOT

MO_GOT - Indicates a GOT-relative relocation.

@ SETCC

SetCC operator - This evaluates to a true value iff the condition is true.

@ SMUL_LOHI

SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...

@ INSERT_SUBVECTOR

INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.

@ BSWAP

Byte Swap and Counting operators.

@ ADD

Simple integer binary arithmetic operators.

@ ANY_EXTEND

ANY_EXTEND - Used for integer types. The high bits are undefined.

@ FMA

FMA - Perform a * b + c with no intermediate rounding step.

@ INTRINSIC_VOID

OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...

@ SINT_TO_FP

[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...

@ CONCAT_VECTORS

CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...

@ FADD

Simple binary floating point operators.

@ ABS

ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.

@ SDIVREM

SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.

@ BUILD_PAIR

BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.

@ EH_RETURN

OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...

@ SIGN_EXTEND

Conversion operators.

@ SCALAR_TO_VECTOR

SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...

@ SSUBO

Same for subtraction.

@ SELECT

Select(COND, TRUEVAL, FALSEVAL).

@ SPLAT_VECTOR

SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.

@ SADDO

RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.

@ SHL

Shift and rotation operations.

@ VECTOR_SHUFFLE

VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.

@ EXTRACT_SUBVECTOR

EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.

@ EXTRACT_VECTOR_ELT

EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...

@ CopyToReg

CopyToReg - This node has three operands: a chain, a register number to set to this value,...

@ ZERO_EXTEND

ZERO_EXTEND - Used for integer types, zeroing the new bits.

@ SELECT_CC

Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...

@ SIGN_EXTEND_INREG

SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...

@ SMIN

[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.

@ GLOBAL_OFFSET_TABLE

The address of the GOT.

@ VSELECT

Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...

@ UADDO_CARRY

Carry-using nodes for multiple precision addition and subtraction.

@ FRAMEADDR

FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.

@ FP_TO_SINT

FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.

@ AND

Bitwise operators - logical and, logical or, logical xor.

@ INTRINSIC_WO_CHAIN

RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...

@ INSERT_VECTOR_ELT

INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.

@ TokenFactor

TokenFactor - This node takes multiple tokens as input and produces a single token result.

@ TRUNCATE

TRUNCATE - Completely drop the high bits.

@ SHL_PARTS

SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.

@ AssertSext

AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...

@ FCOPYSIGN

FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.

@ SADDSAT

RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...

@ INTRINSIC_W_CHAIN

RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...

@ BUILD_VECTOR

BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...

MemIndexedMode

MemIndexedMode enum - This enum defines the load / store indexed addressing modes.

CondCode

ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...

LoadExtType

LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).

This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)

Get the size of a range.

constexpr bool isInt(int64_t x)

Checks if an integer fits into the given bit width.

LLVM_ABI bool isNullConstant(SDValue V)

Returns true if V is a constant integer zero.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

bool isAligned(Align Lhs, uint64_t SizeInBytes)

Checks that SizeInBytes is a multiple of the alignment.

LLVM_ABI Value * getSplatValue(const Value *V)

Get splat value if the input is a splat vector or return nullptr.

int countr_zero(T Val)

Count number of 0's from the least significant bit to the most stopping at the first 1.

auto dyn_cast_or_null(const Y &Val)

constexpr bool isPowerOf2_32(uint32_t Value)

Return true if the argument is a power of two > 0.

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)

format_hex - Output N as a fixed width hexadecimal.

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

LLVM_ABI raw_fd_ostream & errs()

This returns a reference to a raw_ostream for standard error.

AtomicOrdering

Atomic ordering for LLVM's memory model.

@ First

Helpers to iterate all locations in the MemoryEffectsBase class.

uint16_t MCPhysReg

An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...

DWARFExpression::Operation Op

LLVM_ABI int getNextAvailablePluginDiagnosticKind()

Get the next available kind ID for a plugin diagnostic.

unsigned M0(unsigned Val)

constexpr unsigned BitWidth

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

auto find_if(R &&Range, UnaryPredicate P)

Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.

unsigned Log2(Align A)

Returns the log2 of the alignment.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

constexpr uint64_t value() const

This is a hole in the type system and should not be abused.

bool isSimple() const

Test if the given EVT is simple (as opposed to being extended).

bool bitsGT(EVT VT) const

Return true if this has more bits than VT.

TypeSize getSizeInBits() const

Return the size of the specified value type in bits.

bool isPow2VectorType() const

Returns true if the given vector is a power of 2.

static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)

Return the value type corresponding to the specified type.

MVT getSimpleVT() const

Return the SimpleValueType held in the specified simple EVT.

bool isVector() const

Return true if this is a vector value type.

EVT getVectorElementType() const

Given a vector type, return the type of each element.

unsigned getVectorNumElements() const

Given a vector type, return the number of elements it contains.

This class contains a discriminated union of information about pointers in memory operands,...

static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)

Stack pointer relative access.

static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.

This represents a list of ValueType's that has been intern'd by a SelectionDAG.

This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...

This structure contains all information that is necessary for lowering calls.

SmallVector< ISD::InputArg, 32 > Ins

SmallVector< ISD::OutputArg, 32 > Outs

SmallVector< SDValue, 32 > OutVals

bool isBeforeLegalizeOps() const