LLVM: lib/Target/NVPTX/NVPTXISelLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

47#include "llvm/IR/IntrinsicsNVPTX.h"

60#include

61#include

62#include

63#include

64#include

65#include

66#include

67#include

68#include

69

70#define DEBUG_TYPE "nvptx-lower"

71

72using namespace llvm;

73

75

77 "nvptx-sched4reg",

78 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));

79

82 cl::desc("NVPTX Specific: FMA contraction (0: don't do it"

83 " 1: do it 2: do it aggressively"),

85

88 cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"

89 " IEEE Compliant F32 div.rnd if available."),

91

94 cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),

96

97

98

100 "nvptx-approx-log2f32",

101 cl::desc("NVPTX Specific: whether to use lg2.approx for log2"),

103

105 "nvptx-force-min-byval-param-align", cl::Hidden,

106 cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval"

107 " params of device functions."),

109

112

114 } else {

115

117 return 0;

118 else

119 return 2;

120 }

121}

122

125

127 } else {

128

130 }

131}

132

136}

137

140 default:

141 return false;

142 case MVT::v2i1:

143 case MVT::v4i1:

144 case MVT::v2i8:

145 case MVT::v4i8:

146 case MVT::v8i8:

147 case MVT::v16i8:

148 case MVT::v2i16:

149 case MVT::v4i16:

150 case MVT::v8i16:

151 case MVT::v2i32:

152 case MVT::v4i32:

153 case MVT::v2i64:

154 case MVT::v2f16:

155 case MVT::v4f16:

156 case MVT::v8f16:

157 case MVT::v2bf16:

158 case MVT::v4bf16:

159 case MVT::v8bf16:

160 case MVT::v2f32:

161 case MVT::v4f32:

162 case MVT::v2f64:

163 return true;

164 }

165}

166

168 return (VT.SimpleTy == MVT::f16 || VT.SimpleTy == MVT::bf16 ||

170}

171

172

173

174

175

176

177

178

179static std::optional<std::pair<unsigned int, EVT>>

182 return std::nullopt;

183

186

187

188

189

191 default:

192 return std::nullopt;

193 case MVT::v2i8:

194 case MVT::v2i16:

195 case MVT::v2i32:

196 case MVT::v2i64:

197 case MVT::v2f16:

198 case MVT::v2bf16:

199 case MVT::v2f32:

200 case MVT::v2f64:

201 case MVT::v4i8:

202 case MVT::v4i16:

203 case MVT::v4i32:

204 case MVT::v4f16:

205 case MVT::v4bf16:

206 case MVT::v4f32:

207

208 return std::pair(NumElts, EltVT);

209 case MVT::v8i8:

210 case MVT::v8f16:

211 case MVT::v8bf16:

212 case MVT::v8i16:

213 case MVT::v16i8:

214

215

216

217

218

219

220

221

222

223

225

226 return std::pair(NumElts / NPerWord,

228 }

229

231}

232

233

234

235

236

237

238

242 uint64_t StartingOffset = 0) {

245

246

250

251 if (Offsets) {

252 Offsets->push_back(StartingOffset + 0);

253 Offsets->push_back(StartingOffset + 8);

254 }

255

256 return;

257 }

258

259

260 if (StructType *STy = dyn_cast(Ty)) {

261 auto const *SL = DL.getStructLayout(STy);

262 auto ElementNum = 0;

263 for(auto *EI : STy->elements()) {

265 StartingOffset + SL->getElementOffset(ElementNum));

266 ++ElementNum;

267 }

268 return;

269 }

270

271

272 if (ArrayType *ATy = dyn_cast(Ty)) {

273 Type *EltTy = ATy->getElementType();

274 uint64_t EltSize = DL.getTypeAllocSize(EltTy);

275 for (int I : llvm::seq(ATy->getNumElements()))

276 ComputePTXValueVTs(TLI, DL, EltTy, ValueVTs, Offsets, StartingOffset + I * EltSize);

277 return;

278 }

279

280 ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);

281 for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {

282 EVT VT = TempVTs[i];

283 uint64_t Off = TempOffsets[i];

284

285

289

290

291

292

295

296

297

299 case MVT::f16:

300 EltVT = MVT::v2f16;

301 break;

302 case MVT::bf16:

303 EltVT = MVT::v2bf16;

304 break;

305 case MVT::i16:

306 EltVT = MVT::v2i16;

307 break;

308 default:

310 }

311 NumElts /= 2;

312 } else if (EltVT.getSimpleVT() == MVT::i8 &&

314 NumElts == 3)) {

315

316 EltVT = MVT::v4i8;

317 NumElts = (NumElts + 3) / 4;

318 } else if (EltVT.getSimpleVT() == MVT::i8 && NumElts == 2) {

319

320 NumElts = 1;

321 EltVT = MVT::v2i16;

322 }

323 for (unsigned j = 0; j != NumElts; ++j) {

325 if (Offsets)

326 Offsets->push_back(Off + j * EltVT.getStoreSize());

327 }

328 } else {

330 if (Offsets)

331 Offsets->push_back(Off);

332 }

333 }

334}

335

336

337

338

339

340

344 default:

346 "Promotion is not suitable for scalars of size larger than 64-bits");

347 case 1:

348 *PromotedVT = MVT::i1;

349 break;

350 case 2:

351 case 4:

352 case 8:

353 *PromotedVT = MVT::i8;

354 break;

355 case 16:

356 *PromotedVT = MVT::i16;

357 break;

358 case 32:

359 *PromotedVT = MVT::i32;

360 break;

361 case 64:

362 *PromotedVT = MVT::i64;

363 break;

364 }

365 return EVT(*PromotedVT) != VT;

366 }

367 return false;

368}

369

370

371

372

373

374

375

376

377

378

379

383

384

385 if (ParamAlignment < AccessSize)

386 return 1;

387

388 if (Offsets[Idx] & (AccessSize - 1))

389 return 1;

390

391 EVT EltVT = ValueVTs[Idx];

393

394

395 if (EltSize >= AccessSize)

396 return 1;

397

398 unsigned NumElts = AccessSize / EltSize;

399

400 if (AccessSize != EltSize * NumElts)

401 return 1;

402

403

404 if (Idx + NumElts > ValueVTs.size())

405 return 1;

406

407

408 if (NumElts != 4 && NumElts != 2)

409 return 1;

410

411 for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {

412

413 if (ValueVTs[j] != EltVT)

414 return 1;

415

416

417 if (Offsets[j] - Offsets[j - 1] != EltSize)

418 return 1;

419 }

420

421 return NumElts;

422}

423

424

425

427 PVF_INNER = 0x0,

428 PVF_FIRST = 0x1,

429 PVF_LAST = 0x2,

430

433

434

435

436

437

438

439

440

441

445 Align ParamAlignment, bool IsVAArg = false) {

446

447

450

451 if (IsVAArg)

452 return VectorInfo;

453

454

455 for (int I = 0, E = ValueVTs.size(); I != E; ++I) {

456

457 assert(VectorInfo[I] == PVF_SCALAR && "Unexpected vector info state.");

458 for (unsigned AccessSize : {16, 8, 4, 2}) {

460 I, AccessSize, ValueVTs, Offsets, ParamAlignment);

461

462 switch (NumElts) {

463 default:

465 case 1:

466

467 continue;

468 case 2:

469 assert(I + 1 < E && "Not enough elements.");

472 I += 1;

473 break;

474 case 4:

475 assert(I + 3 < E && "Not enough elements.");

480 I += 3;

481 break;

482 }

483

484

485 break;

486 }

487 }

488 return VectorInfo;

489}

490

493 if (Value->getValueType(0) == VT)

496}

497

498

502

503

504

508

511

512

513

515

516

517

519

520

523 else

525

526 auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,

529 switch (Op) {

530

538 break;

541 break;

542 }

544 };

545

546 auto setBF16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,

550 Op, VT, IsOpSupported ? Action : NoBF16Action);

551 };

552

553 auto setI16x2OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,

555 bool IsOpSupported = false;

556

557 switch (Op) {

564 break;

565 }

567 };

568

581

582

587

591

594

595

600

605

606

611

616

617

619

620

621

639

640

641 for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,

642 MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16, MVT::v4i8,

643 MVT::i32, MVT::i64}) {

646 }

647

648

649

656

663

666

668 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},

670

673

675

678

681

682

683

685

686

706

707

713

714

717

723 }

724

729

730

734

735

740

743

744

746

748

749

755 }

756 }

757

758

763

764

766

767 for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {

773

776 }

777

785

792

793

798

808 }

809

814

815

817

818

821

822

826

827

828

831

832

833

834

835

836

837

839 setFP16OperationAction(Op, MVT::f16, Legal, Promote);

840 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);

841 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);

842

843 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);

846 }

847

848

850 for (const auto &VT : {MVT::bf16, MVT::v2bf16}) {

853 }

854 }

855 }

856

857

858 const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 &&

861 for (const auto &VT : {MVT::f16, MVT::v2f16})

863 IsFP16FP16x2NegAvailable ? Legal : Expand);

864

867

868

869

877 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);

880 }

881

884 }

886 for (MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {

889 }

890 }

891

892

893

895 for (MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {

899 }

903 }

904

912

913

920

921

922

923

924 for (const auto &Op :

933 }

934

939 } else {

942 }

947

951 setFP16OperationAction(Op, MVT::f16, Legal, Promote);

952 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);

953 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);

954 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);

957 }

958 bool SupportsF32MinMaxNaN =

962 setFP16OperationAction(Op, MVT::f16, Legal, Expand);

963 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);

964 setBF16OperationAction(Op, MVT::bf16, Legal, Expand);

965 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);

966 }

967

968

971

972

973

974

975

976

982

983

984

990 }

991

992

993

994

995

997

1001}

1002

1004

1005#define MAKE_CASE(V) \

1006 case V: \

1007 return #V;

1008

1011 break;

1012

1075 }

1076 return nullptr;

1077

1078#undef MAKE_CASE

1079}

1080

1087}

1088

1090 int Enabled, int &ExtraSteps,

1091 bool &UseOneConst,

1092 bool Reciprocal) const {

1096

1098 ExtraSteps = 0;

1099

1103

1104 auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {

1107 };

1108

1109

1110

1111

1112

1113 if (Reciprocal || ExtraSteps > 0) {

1114 if (VT == MVT::f32)

1115 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f

1116 : Intrinsic::nvvm_rsqrt_approx_f);

1117 else if (VT == MVT::f64)

1118 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);

1119 else

1121 } else {

1122 if (VT == MVT::f32)

1123 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f

1124 : Intrinsic::nvvm_sqrt_approx_f);

1125 else {

1126

1127

1128

1129

1132 DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),

1133 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));

1134 }

1135 }

1136}

1137

1145}

1146

1150}

1151

1155 std::optional<std::pair<unsigned, const APInt &>> VAInfo,

1156 const CallBase &CB, unsigned UniqueCallSite) const {

1158

1160 assert(isABI && "Non-ABI compilation is not supported");

1161 if (!isABI)

1162 return "";

1163

1164 std::string Prototype;

1166 O << "prototype_" << UniqueCallSite << " : .callprototype ";

1167

1169 O << "()";

1170 } else {

1171 O << "(";

1174 unsigned size = 0;

1175 if (auto *ITy = dyn_cast(retTy)) {

1176 size = ITy->getBitWidth();

1177 } else {

1179 "Floating point type expected here");

1181 }

1182

1183

1184

1186

1187 O << ".param .b" << size << " _";

1188 } else if (isa(retTy)) {

1189 O << ".param .b" << PtrVT.getSizeInBits() << " _";

1191 O << ".param .align " << (retAlignment ? retAlignment->value() : 0)

1192 << " .b8 _[" << DL.getTypeAllocSize(retTy) << "]";

1193 } else {

1195 }

1196 O << ") ";

1197 }

1198 O << "_ (";

1199

1200 bool first = true;

1201

1202 unsigned NumArgs = VAInfo ? VAInfo->first : Args.size();

1203 for (unsigned i = 0, OIdx = 0; i != NumArgs; ++i, ++OIdx) {

1204 Type *Ty = Args[i].Ty;

1205 if (!first) {

1206 O << ", ";

1207 }

1208 first = false;

1209

1210 if (!Outs[OIdx].Flags.isByVal()) {

1212 Align ParamAlign =

1214 O << ".param .align " << ParamAlign.value() << " .b8 ";

1215 O << "_";

1216 O << "[" << DL.getTypeAllocSize(Ty) << "]";

1217

1220 if (unsigned len = vtparts.size())

1221 OIdx += len - 1;

1222 continue;

1223 }

1224

1226 (getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&

1227 "type mismatch between callee prototype and arguments");

1228

1229 unsigned sz = 0;

1230 if (isa(Ty)) {

1231 sz = cast(Ty)->getBitWidth();

1233 } else if (isa(Ty)) {

1234 sz = PtrVT.getSizeInBits();

1235 } else {

1237 }

1238 O << ".param .b" << sz << " ";

1239 O << "_";

1240 continue;

1241 }

1242

1243

1244

1245 Type *ETy = Args[i].IndirectType;

1246 Align InitialAlign = Outs[OIdx].Flags.getNonZeroByValAlign();

1247 Align ParamByValAlign =

1249

1250 O << ".param .align " << ParamByValAlign.value() << " .b8 ";

1251 O << "_";

1252 O << "[" << Outs[OIdx].Flags.getByValSize() << "]";

1253 }

1254

1255 if (VAInfo)

1256 O << (first ? "" : ",") << " .param .align " << VAInfo->second

1257 << " .b8 _[]\n";

1258 O << ")";

1260 O << " .noreturn";

1261 O << ";";

1262

1263 return Prototype;

1264}

1265

1269}

1270

1271Align NVPTXTargetLowering::getArgumentAlignment(const CallBase *CB, Type *Ty,

1272 unsigned Idx,

1274 if (!CB) {

1275

1276 return DL.getABITypeAlign(Ty);

1277 }

1278

1280

1281 if (!DirectCallee) {

1282

1283

1284

1285

1286 if (const auto *CI = dyn_cast(CB)) {

1287

1289 return StackAlign.value();

1290 }

1292 }

1293

1294

1295

1296 if (DirectCallee)

1298

1299

1300 return DL.getABITypeAlign(Ty);

1301}

1302

1304 switch (ElementType.getSimpleVT().SimpleTy) {

1305 default:

1306 return false;

1307 case MVT::f16:

1308 case MVT::bf16:

1309 ElementType = MVT::i16;

1310 return true;

1311 case MVT::f32:

1312 case MVT::v2f16:

1313 case MVT::v2bf16:

1314 ElementType = MVT::i32;

1315 return true;

1316 case MVT::f64:

1317 ElementType = MVT::i64;

1318 return true;

1319 }

1320}

1321

1322

1323

1324

1325

1329 unsigned ArgID, const SDLoc &dl) {

1330

1333

1334

1336 for (unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {

1337

1340 SDValue StoreOperands[] = {Chain, DAG.getConstant(ArgID, dl, MVT::i32),

1342 ShiftVal, InGlue};

1343

1344

1345

1350 }

1351 return Chain;

1352}

1353

1354

1355

1360 const SDLoc &dl) {

1361

1362 EVT MergedType = ElementType;

1364

1365

1367

1368 SDVTList LoadVTs = DAG.getVTList(MVT::i16, MVT::Other, MVT::Glue);

1369 for (unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {

1372 InGlue};

1373

1380

1383 TempProxyRegOps.push_back(TmpLdVal);

1384

1387

1389

1390

1391 TmpLdVal = DAG.getNode(ISD::AND, dl, MergedType, TmpLdVal, CMask);

1392

1393 TmpLdVal = DAG.getNode(ISD::SHL, dl, MergedType, TmpLdVal, CShift);

1394 RetVal = DAG.getNode(ISD::OR, dl, MergedType, RetVal, TmpLdVal);

1395 }

1396 if (ElementType != MergedType)

1398

1399 return RetVal;

1400}

1401

1404 if (!Func)

1405 return false;

1406 if (auto *CalleeFunc = dyn_cast(Func->getGlobal()))

1407 return CB->getFunctionType() != CalleeFunc->getFunctionType();

1408 return false;

1409}

1410

1413

1416 "Support for variadic functions (unsized array parameter) introduced "

1417 "in PTX ISA version 6.0 and requires target sm_30.");

1418

1431

1433 assert(isABI && "Non-ABI compilation is not supported");

1434 if (!isABI)

1435 return Chain;

1436

1437

1438

1439

1440

1441

1442

1443

1444

1445

1446

1447

1448

1449

1450

1451

1452

1453 SDValue VADeclareParam;

1454 unsigned FirstVAArg = CLI.NumFixedArgs;

1455 unsigned VAOffset = 0;

1456

1458 SDValue TempChain = Chain;

1461

1462 unsigned ParamCount = 0;

1463

1464

1465

1466

1467

1468

1469

1470

1471

1472 unsigned OIdx = 0;

1473

1474

1475 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {

1476 EVT VT = Outs[OIdx].VT;

1477 Type *Ty = Args[i].Ty;

1479 bool IsByVal = Outs[OIdx].Flags.isByVal();

1480

1483

1484 assert((!IsByVal || Args[i].IndirectType) &&

1485 "byval arg must have indirect type");

1486 Type *ETy = (IsByVal ? Args[i].IndirectType : Ty);

1488

1490 if (IsByVal) {

1491

1492

1493

1494 Align InitialAlign = Outs[OIdx].Flags.getNonZeroByValAlign();

1496 InitialAlign, DL);

1497 if (IsVAArg)

1498 VAOffset = alignTo(VAOffset, ArgAlign);

1499 } else {

1500 ArgAlign = getArgumentAlignment(CB, Ty, ParamCount + 1, DL);

1501 }

1502

1504 (IsByVal ? Outs[OIdx].Flags.getByValSize() : DL.getTypeAllocSize(Ty));

1505 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);

1506

1507 bool NeedAlign;

1509 if (IsVAArg) {

1510 if (ParamCount == FirstVAArg) {

1511 SDValue DeclareParamOps[] = {

1513 DAG.getConstant(ParamCount, dl, MVT::i32),

1514 DAG.getConstant(1, dl, MVT::i32), InGlue};

1516 DeclareParamVTs, DeclareParamOps);

1517 }

1518 NeedAlign = PassAsArray;

1519 } else if (PassAsArray) {

1520

1521 SDValue DeclareParamOps[] = {

1523 DAG.getConstant(ParamCount, dl, MVT::i32),

1526 DeclareParamOps);

1527 NeedAlign = true;

1528 } else {

1529

1531

1532

1533

1535 }

1536 SDValue DeclareScalarParamOps[] = {

1537 Chain, DAG.getConstant(ParamCount, dl, MVT::i32),

1539 DAG.getConstant(0, dl, MVT::i32), InGlue};

1541 DeclareScalarParamOps);

1542 NeedAlign = false;

1543 }

1545

1546

1547

1548

1549

1550 bool ExtendIntegerParam =

1551 Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Ty) < 32;

1552

1555 for (unsigned j = 0, je = VTs.size(); j != je; ++j) {

1556 EVT EltVT = VTs[j];

1557 int CurOffset = Offsets[j];

1559 if (NeedAlign)

1561

1562 SDValue StVal = OutVals[OIdx];

1563

1564 MVT PromotedVT;

1566 EltVT = EVT(PromotedVT);

1567 }

1571 StVal = DAG.getNode(Ext, dl, PromotedVT, StVal);

1572 }

1573

1574 if (IsByVal) {

1579 PartAlign);

1580 } else if (ExtendIntegerParam) {

1581 assert(VTs.size() == 1 && "Scalar can't have multiple parts.");

1582

1585 dl, MVT::i32, StVal);

1586 }

1587

1588 if (!ExtendIntegerParam && EltVT.getSizeInBits() < 16) {

1589

1590

1592 }

1593

1594

1595

1596 if (VectorInfo[j] == PVF_SCALAR && !IsVAArg && PartAlign.has_value() &&

1597 PartAlign.value() <

1599 assert(StoreOperands.empty() && "Unfinished preceeding store.");

1601 DAG, Chain, IsByVal ? CurOffset + VAOffset : CurOffset, EltVT,

1602 StVal, InGlue, ParamCount, dl);

1603

1604

1605

1606 if (!IsByVal)

1607 ++OIdx;

1608 continue;

1609 }

1610

1611

1612 if (VectorInfo[j] & PVF_FIRST) {

1613 assert(StoreOperands.empty() && "Unfinished preceding store.");

1616 DAG.getConstant(IsVAArg ? FirstVAArg : ParamCount, dl, MVT::i32));

1617

1619 IsByVal ? CurOffset + VAOffset : (IsVAArg ? VAOffset : CurOffset),

1620 dl, MVT::i32));

1621 }

1622

1623

1625

1626 if (VectorInfo[j] & PVF_LAST) {

1627 unsigned NumElts = StoreOperands.size() - 3;

1629 switch (NumElts) {

1630 case 1:

1632 break;

1633 case 2:

1635 break;

1636 case 4:

1638 break;

1639 default:

1641 }

1642

1643 StoreOperands.push_back(InGlue);

1644

1645

1646

1647 EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;

1648

1650 Op, dl, DAG.getVTList(MVT::Other, MVT::Glue), StoreOperands,

1654

1655

1656 StoreOperands.clear();

1657

1658

1659

1660 if (!IsByVal && IsVAArg) {

1661 assert(NumElts == 1 &&

1662 "Vectorization is expected to be disabled for variadics.");

1663 VAOffset += DL.getTypeAllocSize(

1665 }

1666 }

1667 if (!IsByVal)

1668 ++OIdx;

1669 }

1670 assert(StoreOperands.empty() && "Unfinished parameter store.");

1671 if (!IsByVal && VTs.size() > 0)

1672 --OIdx;

1673 ++ParamCount;

1674 if (IsByVal && IsVAArg)

1676 }

1677

1678 GlobalAddressSDNode *Func = dyn_cast(Callee.getNode());

1679 MaybeAlign retAlignment = std::nullopt;

1680

1681

1682 if (Ins.size() > 0) {

1685

1686

1687

1688

1689 unsigned resultsz = DL.getTypeAllocSizeInBits(RetTy);

1693 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),

1694 DAG.getConstant(resultsz, dl, MVT::i32),

1695 DAG.getConstant(0, dl, MVT::i32), InGlue };

1697 DeclareRetOps);

1699 } else {

1700 retAlignment = getArgumentAlignment(CB, RetTy, 0, DL);

1701 assert(retAlignment && "retAlignment is guaranteed to be set");

1703 SDValue DeclareRetOps[] = {

1704 Chain, DAG.getConstant(retAlignment->value(), dl, MVT::i32),

1705 DAG.getConstant(resultsz / 8, dl, MVT::i32),

1706 DAG.getConstant(0, dl, MVT::i32), InGlue};

1708 DeclareRetOps);

1710 }

1711 }

1712

1714

1715

1716 if (HasVAArgs) {

1717 SDValue DeclareParamOps[] = {

1722 VADeclareParam->getVTList(), DeclareParamOps);

1723 }

1724

1725

1726

1728

1729

1730

1731

1732 bool isIndirectCall = (!Func && CB) || ConvertToIndirectCall;

1733

1734 if (isa(Callee)) {

1735 Function* CalleeFunc = nullptr;

1736

1737

1739 assert(CalleeFunc != nullptr && "Libcall callee must be set.");

1740

1741

1742

1743 CalleeFunc->addFnAttr("nvptx-libcall-callee", "true");

1744 }

1745

1747

1748

1749

1750

1751

1752

1753

1756 DL, RetTy, Args, Outs, retAlignment,

1757 HasVAArgs

1758 ? std::optional<std::pair<unsigned, const APInt &>>(std::make_pair(

1760 : std::nullopt,

1761 *CB, UniqueCallSite);

1764 Chain,

1766 InGlue,

1767 };

1770 }

1771

1773 SDValue PrintCallOps[] = {

1774 Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InGlue

1775 };

1776

1781 Chain = DAG.getNode(Opcode, dl, PrintCallVTs, PrintCallOps);

1783

1784 if (ConvertToIndirectCall) {

1785

1786

1787 EVT DestVT = Callee.getValueType();

1790 unsigned DestReg =

1793 Callee = DAG.getCopyFromReg(RegCopy, dl, DestReg, DestVT);

1794 }

1795

1796

1798 SDValue CallVoidOps[] = { Chain, Callee, InGlue };

1801

1802

1803 SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);

1804 SDValue CallArgBeginOps[] = { Chain, InGlue };

1806 CallArgBeginOps);

1808

1809 for (unsigned i = 0, e = std::min(CLI.NumFixedArgs + 1, ParamCount); i != e;

1810 ++i) {

1811 unsigned opcode;

1812 if (i == (e - 1))

1814 else

1818 DAG.getConstant(i, dl, MVT::i32), InGlue };

1819 Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);

1821 }

1823 SDValue CallArgEndOps[] = { Chain,

1825 InGlue };

1828

1831 SDValue PrototypeOps[] = {

1832 Chain, DAG.getConstant(UniqueCallSite, dl, MVT::i32), InGlue};

1835 }

1836

1839

1840

1841

1843

1844

1845

1847

1848

1849 if (Ins.size() > 0) {

1853 assert(VTs.size() == Ins.size() && "Bad value decomposition");

1854

1855 Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);

1857

1859 int VecIdx = -1;

1860

1861

1862

1863

1864 bool ExtendIntegerRetVal =

1865 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;

1866

1867 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {

1868 bool needTruncate = false;

1869 EVT TheLoadType = VTs[i];

1870 EVT EltType = Ins[i].VT;

1872 MVT PromotedVT;

1873

1875 TheLoadType = EVT(PromotedVT);

1876 EltType = EVT(PromotedVT);

1877 needTruncate = true;

1878 }

1879

1880 if (ExtendIntegerRetVal) {

1881 TheLoadType = MVT::i32;

1882 EltType = MVT::i32;

1883 needTruncate = true;

1885 if (VTs[i].isInteger())

1886 needTruncate = true;

1887 EltType = MVT::i16;

1888 }

1889

1890

1891

1892 if (VectorInfo[i] == PVF_SCALAR && RetTy->isAggregateType() &&

1893 EltAlign < DL.getABITypeAlign(

1895 assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");

1897 DAG, Chain, Offsets[i], TheLoadType, InGlue, TempProxyRegOps, dl);

1899 ProxyRegTruncates.push_back(std::optional());

1902

1903 continue;

1904 }

1905

1906

1907 if (VectorInfo[i] & PVF_FIRST) {

1908 assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");

1909 VecIdx = i;

1910 }

1911

1913

1914 if (VectorInfo[i] & PVF_LAST) {

1915 unsigned NumElts = LoadVTs.size();

1919 switch (NumElts) {

1920 case 1:

1922 break;

1923 case 2:

1925 break;

1926 case 4:

1928 break;

1929 default:

1931 }

1932

1933 SDValue LoadOperands[] = {

1934 Chain, DAG.getConstant(1, dl, MVT::i32),

1935 DAG.getConstant(Offsets[VecIdx], dl, MVT::i32), InGlue};

1937 Op, dl, DAG.getVTList(LoadVTs), LoadOperands, TheLoadType,

1940

1941 for (unsigned j = 0; j < NumElts; ++j) {

1943

1944 if (needTruncate)

1945 ProxyRegTruncates.push_back(std::optional(Ins[VecIdx + j].VT));

1946 else

1947 ProxyRegTruncates.push_back(std::optional());

1948 }

1949

1950 Chain = RetVal.getValue(NumElts);

1951 InGlue = RetVal.getValue(NumElts + 1);

1952

1953

1954 VecIdx = -1;

1955 LoadVTs.clear();

1956 }

1957 }

1958 }

1959

1960 Chain =

1961 DAG.getCALLSEQ_END(Chain, UniqueCallSite, UniqueCallSite + 1, InGlue, dl);

1963

1964

1965

1966

1967 for (unsigned i = 0; i < ProxyRegOps.size(); ++i) {

1968 if (i < RetElts.size() && RetElts[i]) {

1970 continue;

1971 }

1972

1975 DAG.getVTList(ProxyRegOps[i].getSimpleValueType(), MVT::Other, MVT::Glue),

1976 { Chain, ProxyRegOps[i], InGlue }

1977 );

1978

1979 Chain = Ret.getValue(1);

1980 InGlue = Ret.getValue(2);

1981

1982 if (ProxyRegTruncates[i]) {

1984 }

1985

1987 }

1988

1989 for (SDValue &T : TempProxyRegOps) {

1992 DAG.getVTList(T.getSimpleValueType(), MVT::Other, MVT::Glue),

1993 {Chain, T.getOperand(0), InGlue});

1996

1999 }

2000

2001

2002

2003 isTailCall = false;

2004 return Chain;

2005}

2006

2009

2012

2014 Fn,

2015 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "

2016 "requires target sm_52.",

2020 Op.getOperand(0)};

2022 }

2023

2024 SDValue Chain = Op.getOperand(0);

2026 uint64_t Align = cast(Op.getOperand(2))->getZExtValue();

2028

2029

2030 MVT ValueSizeTy = nvTM->is64Bit() ? MVT::i64 : MVT::i32;

2031

2034 EVT RetTypes[] = {ValueSizeTy, MVT::Other};

2036}

2037

2043

2045 Fn,

2046 "Support for stackrestore requires PTX ISA version >= 7.3 and target "

2047 ">= sm_52.",

2048 DL.getDebugLoc());

2050 return Op.getOperand(0);

2051 }

2052

2054 SDValue Chain = Op.getOperand(0);

2059}

2060

2066

2068 Fn,

2069 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "

2070 "sm_52.",

2071 DL.getDebugLoc());

2073 auto Ops = {DAG.getConstant(0, DL, Op.getValueType()), Op.getOperand(0)};

2075 }

2076

2078 SDValue Chain = Op.getOperand(0);

2084}

2085

2086

2087

2088

2091 SDNode *Node = Op.getNode();

2094 unsigned NumOperands = Node->getNumOperands();

2095 for (unsigned i = 0; i < NumOperands; ++i) {

2096 SDValue SubOp = Node->getOperand(i);

2100 for (unsigned j = 0; j < NumSubElem; ++j) {

2103 }

2104 }

2106}

2107

2109

2110

2111 EVT FromVT = Op->getOperand(0)->getValueType(0);

2112 if (FromVT != MVT::v2i8) {

2113 return Op;

2114 }

2115

2116

2127 {Extend0, DAG.getNode(ISD::SHL, DL, MVT::i16, {Extend1, Const8})});

2128 EVT ToVT = Op->getValueType(0);

2130}

2131

2132

2133

2134

2135

2138 EVT VT = Op->getValueType(0);

2139 if (!(Isv2x16VT(VT) || VT == MVT::v4i8))

2140 return Op;

2142

2144 return Operand->isUndef() || isa(Operand) ||

2145 isa(Operand);

2146 })) {

2147 if (VT != MVT::v4i8)

2148 return Op;

2149

2150

2155 if (Cast) {

2158 }

2163 };

2164 auto PRMT__10 = GetPRMT(Op->getOperand(0), Op->getOperand(1), true, 0x3340);

2165 auto PRMT__32 = GetPRMT(Op->getOperand(2), Op->getOperand(3), true, 0x3340);

2166 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32, false, 0x5410);

2168 }

2169

2170

2172 const SDValue &Operand = Op->getOperand(N);

2173 EVT VT = Op->getValueType(0);

2175 return APInt(32, 0);

2177 if (VT == MVT::v2f16 || VT == MVT::v2bf16)

2178 Value = cast(Operand)->getValueAPF().bitcastToAPInt();

2179 else if (VT == MVT::v2i16 || VT == MVT::v4i8)

2181 else

2183

2184

2185 if (VT == MVT::v4i8)

2187 return Value.zext(32);

2188 };

2191 Value = GetOperand(Op, 0) | GetOperand(Op, 1).shl(16);

2192 } else if (VT == MVT::v4i8) {

2193 Value = GetOperand(Op, 0) | GetOperand(Op, 1).shl(8) |

2194 GetOperand(Op, 2).shl(16) | GetOperand(Op, 3).shl(24);

2195 } else {

2197 }

2200}

2201

2202SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,

2207 EVT VectorVT = Vector.getValueType();

2208

2209 if (VectorVT == MVT::v4i8) {

2218 }

2219

2220

2221 if (isa(Index.getNode()))

2222 return Op;

2223

2224

2225 assert(Isv2x16VT(VectorVT) && "Unexpected vector type.");

2227

2235}

2236

2237SDValue NVPTXTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,

2240 EVT VectorVT = Vector.getValueType();

2241

2242 if (VectorVT != MVT::v4i8)

2243 return Op;

2246 if (Value->isUndef())

2248

2250

2259}

2260

2265 if (VectorVT != MVT::v4i8 || Op.getValueType() != MVT::v4i8)

2266 return Op;

2267

2268

2273 if (I.value() != -1)

2274 Selector |= (I.value() << (I.index() * 4));

2275 }

2276

2281}

2282

2283

2284

2285

2286

2287SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,

2291

2292 EVT VT = Op.getValueType();

2295 SDValue ShOpLo = Op.getOperand(0);

2296 SDValue ShOpHi = Op.getOperand(1);

2297 SDValue ShAmt = Op.getOperand(2);

2299

2300 if (VTBits == 32 && STI.getSmVersion() >= 35) {

2301

2302

2303

2304

2305

2309

2312 }

2313 else {

2314

2315

2316

2317

2318

2319

2320

2321

2324 ShAmt);

2331

2337

2340 }

2341}

2342

2343

2344

2345

2346

2347

2352

2353 EVT VT = Op.getValueType();

2356 SDValue ShOpLo = Op.getOperand(0);

2357 SDValue ShOpHi = Op.getOperand(1);

2358 SDValue ShAmt = Op.getOperand(2);

2359

2360 if (VTBits == 32 && STI.getSmVersion() >= 35) {

2361

2362

2363

2364

2365

2369

2372 }

2373 else {

2374

2375

2376

2377

2378

2379

2380

2381

2384 ShAmt);

2391

2397

2400 }

2401}

2402

2403

2404

2407 EVT VT = Op.getValueType();

2409

2413

2414 if (!SrcVT.bitsEq(VT))

2416

2418}

2419

2421 EVT VT = Op.getValueType();

2422

2423 if (VT == MVT::f32)

2424 return LowerFROUND32(Op, DAG);

2425

2426 if (VT == MVT::f64)

2427 return LowerFROUND64(Op, DAG);

2428

2430}

2431

2432

2433

2434

2435

2436

2437

2438

2443 EVT VT = Op.getValueType();

2444

2446

2447

2449 const unsigned SignBitMask = 0x80000000;

2451 DAG.getConstant(SignBitMask, SL, MVT::i32));

2452 const unsigned PointFiveInBits = 0x3F000000;

2453 SDValue PointFiveWithSignRaw =

2455 DAG.getConstant(PointFiveInBits, SL, MVT::i32));

2456 SDValue PointFiveWithSign =

2460

2461

2467

2468

2472 return DAG.getNode(ISD::SELECT, SL, VT, IsSmall, RoundedAForSmallA, RoundedA);

2473}

2474

2475

2476

2477

2478

2479

2484 EVT VT = Op.getValueType();

2485

2487

2488

2492

2493

2499 RoundedA);

2500

2501

2504

2505

2510}

2511

2513 EVT VT = N->getValueType(0);

2514 EVT NVT = MVT::f32;

2517 }

2521 SDValue Res = DAG.getNode(N->getOpcode(), DL, NVT, Tmp0, Tmp1, N->getFlags());

2523}

2524

2525SDValue NVPTXTargetLowering::PromoteBinOpIfF32FTZ(SDValue Op,

2529 }

2530 return Op;

2531}

2532

2536

2537 if (Op.getValueType() == MVT::bf16) {

2541 DAG.getNode(Op.getOpcode(), Loc, MVT::f32, Op.getOperand(0)),

2543 }

2544

2545

2546 return Op;

2547}

2548

2552

2553 if (Op.getOperand(0).getValueType() == MVT::bf16) {

2556 Op.getOpcode(), Loc, Op.getValueType(),

2558 }

2559

2560

2561 return Op;

2562}

2563

2566 EVT NarrowVT = Op.getValueType();

2567 SDValue Wide = Op.getOperand(0);

2573 }

2575

2578 return Op;

2579 }

2582

2583

2586 : MVT::f32,

2587 Wide, Loc, DAG);

2589 }

2590 }

2592 }

2593 }

2594

2595

2596 return Op;

2597}

2598

2601 SDValue Narrow = Op.getOperand(0);

2603 EVT WideVT = Op.getValueType();

2609 }

2613 : MVT::f32;

2617 } else {

2619 }

2621 }

2622 }

2623

2624

2625 return Op;

2626}

2627

2630 if (Op.getValueType() != MVT::v2i16)

2631 return Op;

2632 EVT EltVT = Op.getValueType().getVectorElementType();

2634 for (int I = 0, E = Op.getValueType().getVectorNumElements(); I < E; I++) {

2637 [&](const SDUse &O) {

2638 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,

2639 O.get(), DAG.getIntPtrConstant(I, DL));

2640 });

2642 }

2645 return V;

2646}

2647

2650 switch (Op.getOpcode()) {

2658 return Op;

2660 return LowerBUILD_VECTOR(Op, DAG);

2662 return LowerBITCAST(Op, DAG);

2664 return Op;

2666 return LowerEXTRACT_VECTOR_ELT(Op, DAG);

2668 return LowerINSERT_VECTOR_ELT(Op, DAG);

2670 return LowerVECTOR_SHUFFLE(Op, DAG);

2672 return LowerCONCAT_VECTORS(Op, DAG);

2674 return LowerSTORE(Op, DAG);

2676 return LowerLOAD(Op, DAG);

2678 return LowerShiftLeftParts(Op, DAG);

2681 return LowerShiftRightParts(Op, DAG);

2683 return LowerSelect(Op, DAG);

2685 return LowerFROUND(Op, DAG);

2687 return LowerFCOPYSIGN(Op, DAG);

2690 return LowerINT_TO_FP(Op, DAG);

2693 return LowerFP_TO_INT(Op, DAG);

2695 return LowerFP_ROUND(Op, DAG);

2697 return LowerFP_EXTEND(Op, DAG);

2699 return LowerBR_JT(Op, DAG);

2701 return LowerVAARG(Op, DAG);

2703 return LowerVASTART(Op, DAG);

2723 return LowerCopyToReg_128(Op, DAG);

2727

2728 return PromoteBinOpIfF32FTZ(Op, DAG);

2729

2730 default:

2731 llvm_unreachable("Custom lowering not defined for operation");

2732 }

2733}

2734

2737 SDValue Chain = Op.getOperand(0);

2738 const auto *JT = cast(Op.getOperand(1));

2739 SDValue Index = Op.getOperand(2);

2740

2741 unsigned JId = JT->getIndex();

2744

2746

2747

2750

2751

2756

2757

2761

2762 return BrxEnd;

2763}

2764

2765

2768}

2769

2770

2771

2775

2776 SDNode *Node = Op.getNode();

2777 const Value *V = cast(Node->getOperand(2))->getValue();

2778 EVT VT = Node->getValueType(0);

2780 SDValue Tmp1 = Node->getOperand(0);

2781 SDValue Tmp2 = Node->getOperand(1);

2782 const MaybeAlign MA(Node->getConstantOperandVal(3));

2783

2786 SDValue VAList = VAListLoad;

2787

2792

2796 }

2797

2798

2802

2803

2806

2809

2810

2812}

2813

2818

2819

2820 SDValue Arg = getParamSymbol(DAG, -1, PtrVT);

2822

2823 const Value *SV = cast(Op.getOperand(2))->getValue();

2824 return DAG.getStore(Op.getOperand(0), DL, VAReg, Op.getOperand(1),

2826}

2827

2829 SDValue Op0 = Op->getOperand(0);

2830 SDValue Op1 = Op->getOperand(1);

2831 SDValue Op2 = Op->getOperand(2);

2833

2834 assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");

2835

2840

2841 return Trunc;

2842}

2843

2845 if (Op.getValueType() == MVT::i1)

2846 return LowerLOADi1(Op, DAG);

2847

2848

2849

2850 EVT VT = Op.getValueType();

2851 if (Isv2x16VT(VT) || VT == MVT::v4i8) {

2853 EVT MemVT = Load->getMemoryVT();

2855 MemVT, *Load->getMemOperand())) {

2859 }

2860 }

2861

2863}

2864

2865

2866

2867

2868

2874 assert(Node->getValueType(0) == MVT::i1 &&

2875 "Custom lowering for i1 load only");

2877 LD->getBasePtr(), LD->getPointerInfo(),

2878 MVT::i8, LD->getAlign(),

2879 LD->getMemOperand()->getFlags());

2881

2882

2883

2884 SDValue Ops[] = { result, LD->getChain() };

2886}

2887

2890 EVT VT = Store->getMemoryVT();

2891

2892 if (VT == MVT::i1)

2893 return LowerSTOREi1(Op, DAG);

2894

2895

2896

2897 if ((Isv2x16VT(VT) || VT == MVT::v4i8) &&

2899 VT, *Store->getMemOperand()))

2901

2902

2903 if (Isv2x16VT(VT) || VT == MVT::v4i8)

2905

2907 return LowerSTOREVector(Op, DAG);

2908

2910}

2911

2915 SDValue Val = N->getOperand(1);

2918

2920 if (!NumEltsAndEltVT)

2922 auto [NumElts, EltVT] = NumEltsAndEltVT.value();

2923

2924 MemSDNode *MemSD = cast(N);

2926

2929 if (Alignment < PrefAlign) {

2930

2931

2932

2933

2934

2936 }

2937

2938

2939

2940

2941 bool NeedExt = false;

2943 NeedExt = true;

2944

2945 unsigned Opcode = 0;

2946 switch (NumElts) {

2947 default:

2949 case 2:

2951 break;

2952 case 4:

2954 break;

2955 }

2956

2958

2959

2961

2962

2964 "NumElts should not increase, only decrease or stay the same.");

2966

2967

2970

2971

2973 for (unsigned i = 0; i < NumElts; ++i) {

2976 NumEltsPerSubVector);

2979 }

2980 } else {

2981 for (unsigned i = 0; i < NumElts; ++i) {

2984 if (NeedExt)

2987 }

2988 }

2989

2990

2991 Ops.append(N->op_begin() + 2, N->op_end());

2992

2996

2997

2998 return NewSt;

2999}

3000

3001

3002

3003

3004

3010 SDValue Tmp2 = ST->getBasePtr();

3012 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");

3015 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,

3016 ST->getAlign(), ST->getMemOperand()->getFlags());

3018}

3019

3022

3023

3024

3025 assert(Op.getOperand(1).getValueType() == MVT::i128 &&

3026 "Custom lowering for 128-bit CopyToReg only");

3027

3030

3036

3039

3040 NewOps[0] = Op->getOperand(0);

3041 NewOps[1] = Op->getOperand(1);

3042 NewOps[2] = Lo;

3043 NewOps[3] = Hi;

3045 NewOps[4] = Op->getOperand(3);

3046

3048}

3049

3050unsigned NVPTXTargetLowering::getNumRegisters(

3052 std::optional RegisterVT = std::nullopt) const {

3053 if (VT == MVT::i128 && RegisterVT == MVT::i128)

3054 return 1;

3056}

3057

3058bool NVPTXTargetLowering::splitValueIntoRegisterParts(

3060 unsigned NumParts, MVT PartVT, std::optionalCallingConv::ID CC) const {

3061 if (Val.getValueType() == MVT::i128 && NumParts == 1) {

3062 Parts[0] = Val;

3063 return true;

3064 }

3065 return false;

3066}

3067

3068

3069

3070

3071

3073 EVT v) const {

3077}

3078

3086

3090

3092 std::vector OutChains;

3093

3095 assert(isABI && "Non-ABI compilation is not supported");

3096 if (!isABI)

3097 return Chain;

3098

3099 std::vector<Type *> argTypes;

3100 std::vector<const Argument *> theArgs;

3101 for (const Argument &I : F->args()) {

3102 theArgs.push_back(&I);

3103 argTypes.push_back(I.getType());

3104 }

3105

3106

3107

3108

3109

3110

3111

3112

3113

3114 unsigned InsIdx = 0;

3115

3116 for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++InsIdx) {

3117 Type *Ty = argTypes[i];

3118

3119 if (theArgs[i]->use_empty()) {

3120

3123

3125 if (vtparts.empty())

3127

3128 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;

3129 ++parti) {

3131 ++InsIdx;

3132 }

3133 if (vtparts.size() > 0)

3134 --InsIdx;

3135 continue;

3136 }

3139 unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);

3140 for (unsigned parti = 0; parti < NumRegs; ++parti) {

3142 ++InsIdx;

3143 }

3144 if (NumRegs > 0)

3145 --InsIdx;

3146 continue;

3147 }

3149 continue;

3150 }

3151

3152

3153

3154

3155

3156 if (!PAL.hasParamAttr(i, Attribute::ByVal)) {

3157 bool aggregateIsPacked = false;

3158 if (StructType *STy = dyn_cast(Ty))

3159 aggregateIsPacked = STy->isPacked();

3160

3164 if (VTs.empty())

3166

3170

3171 SDValue Arg = getParamSymbol(DAG, i, PtrVT);

3172 int VecIdx = -1;

3173 for (unsigned parti = 0, parte = VTs.size(); parti != parte; ++parti) {

3174 if (VectorInfo[parti] & PVF_FIRST) {

3175 assert(VecIdx == -1 && "Orphaned vector.");

3176 VecIdx = parti;

3177 }

3178

3179

3180 if (VectorInfo[parti] & PVF_LAST) {

3181 unsigned NumElts = parti - VecIdx + 1;

3182 EVT EltVT = VTs[parti];

3183

3184 EVT LoadVT = EltVT;

3185 if (EltVT == MVT::i1)

3186 LoadVT = MVT::i8;

3187 else if (Isv2x16VT(EltVT) || EltVT == MVT::v4i8)

3188

3189

3190

3191 LoadVT = MVT::i32;

3192

3196 DAG.getConstant(Offsets[VecIdx], dl, PtrVT));

3199

3201 if (aggregateIsPacked)

3202 return Align(1);

3203 if (NumElts != 1)

3204 return std::nullopt;

3205 Align PartAlign =

3208 }();

3213 if (P.getNode())

3214 P.getNode()->setIROrder(i + 1);

3215 for (unsigned j = 0; j < NumElts; ++j) {

3218

3219 if (EltVT == MVT::i1)

3221

3222 else if (EltVT != LoadVT)

3224

3225

3226 MVT PromotedVT;

3229 }

3230

3231

3232

3233 if (Ins[InsIdx].VT.isInteger() &&

3234 Ins[InsIdx].VT.getFixedSizeInBits() >

3236 unsigned Extend = Ins[InsIdx].Flags.isSExt() ? ISD::SIGN_EXTEND

3238 Elt = DAG.getNode(Extend, dl, Ins[InsIdx].VT, Elt);

3239 }

3241 }

3242

3243

3244 VecIdx = -1;

3245 }

3246 ++InsIdx;

3247 }

3248 if (VTs.size() > 0)

3249 --InsIdx;

3250 continue;

3251 }

3252

3253

3254

3255

3256

3257

3258

3259

3261 assert(ObjectVT == Ins[InsIdx].VT &&

3262 "Ins type did not match function type");

3263 SDValue Arg = getParamSymbol(DAG, i, PtrVT);

3265 if (p.getNode())

3266 p.getNode()->setIROrder(i + 1);

3268 }

3269

3270 if (!OutChains.empty())

3272

3273 return Chain;

3274}

3275

3276

3277

3281

3284

3285

3286 for (unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {

3287

3291 ShiftVal};

3292

3293

3294

3296 DAG.getVTList(MVT::Other), StoreOperands,

3299 }

3300 return Chain;

3301}

3302

3305 bool isVarArg,

3312

3314 assert(isABI && "Non-ABI compilation is not supported");

3315 if (!isABI)

3316 return Chain;

3317

3323 assert(VTs.size() == OutVals.size() && "Bad return value decomposition");

3324

3325 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {

3326 SDValue PromotedOutVal = OutVals[i];

3327 MVT PromotedVT;

3329 VTs[i] = EVT(PromotedVT);

3330 }

3334 PromotedOutVal = DAG.getNode(Ext, dl, PromotedVT, PromotedOutVal);

3335 }

3336 PromotedOutVals.push_back(PromotedOutVal);

3337 }

3338

3340 VTs, Offsets,

3343

3344

3345

3346

3347 bool ExtendIntegerRetVal =

3348 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;

3349

3351 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {

3352 SDValue OutVal = OutVals[i];

3353 SDValue RetVal = PromotedOutVals[i];

3354

3355 if (ExtendIntegerRetVal) {

3358 dl, MVT::i32, RetVal);

3360

3361

3363 }

3364

3365

3366

3367 if (VectorInfo[i] == PVF_SCALAR && RetTy->isAggregateType()) {

3368 EVT ElementType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];

3369 Align ElementTypeAlign =

3370 DL.getABITypeAlign(ElementType.getTypeForEVT(RetTy->getContext()));

3371 Align ElementAlign =

3373 if (ElementAlign < ElementTypeAlign) {

3374 assert(StoreOperands.empty() && "Orphaned operand list.");

3376 RetVal, dl);

3377

3378

3379

3380 continue;

3381 }

3382 }

3383

3384

3385 if (VectorInfo[i] & PVF_FIRST) {

3386 assert(StoreOperands.empty() && "Orphaned operand list.");

3389 }

3390

3391

3392 StoreOperands.push_back(RetVal);

3393

3394

3395 if (VectorInfo[i] & PVF_LAST) {

3397 unsigned NumElts = StoreOperands.size() - 2;

3398 switch (NumElts) {

3399 case 1:

3401 break;

3402 case 2:

3404 break;

3405 case 4:

3407 break;

3408 default:

3410 }

3411

3412

3413

3414 EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];

3416 Op, dl, DAG.getVTList(MVT::Other), StoreOperands, TheStoreType,

3418

3419 StoreOperands.clear();

3420 }

3421 }

3422

3424}

3425

3429 if (Constraint.size() > 1)

3430 return;

3432}

3433

3434

3435

3436

3437

3438

3442 switch (Intrinsic) {

3443 default:

3444 return false;

3445 case Intrinsic::nvvm_match_all_sync_i32p:

3446 case Intrinsic::nvvm_match_all_sync_i64p:

3448

3449

3450

3451 Info.memVT = MVT::i1;

3452

3453

3455 return true;

3456 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:

3457 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:

3458 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:

3459 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:

3460 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:

3461 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:

3462 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:

3463 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:

3464 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:

3465 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:

3466 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:

3467 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:

3468 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:

3469 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:

3470 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:

3471 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:

3472 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:

3473 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:

3474 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:

3475 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:

3476 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:

3477 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:

3478 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:

3479 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {

3481 Info.memVT = MVT::v8f16;

3482 Info.ptrVal = I.getArgOperand(0);

3483 Info.offset = 0;

3486 return true;

3487 }

3488 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:

3489 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:

3490 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:

3491 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:

3492 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:

3493 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:

3494 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:

3495 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:

3496 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:

3497 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:

3498 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:

3499 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:

3500 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:

3501 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:

3502 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:

3503 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:

3504 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:

3505 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:

3506 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:

3507 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:

3508 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:

3509 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:

3510 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:

3511 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {

3513 Info.memVT = MVT::v2i32;

3514 Info.ptrVal = I.getArgOperand(0);

3515 Info.offset = 0;

3518 return true;

3519 }

3520

3521 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:

3522 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:

3523 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:

3524 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:

3525 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:

3526 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:

3527 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:

3528 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:

3529 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:

3530 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:

3531 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:

3532 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:

3533 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:

3534 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:

3535 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:

3536 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:

3537

3538 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:

3539 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:

3540 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:

3541 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:

3542 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:

3543 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:

3544 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:

3545 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:

3546 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:

3547 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:

3548 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:

3549 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:

3550 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:

3551 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:

3552 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:

3553 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:

3554 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:

3555 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16: {

3557 Info.memVT = MVT::v4i32;

3558 Info.ptrVal = I.getArgOperand(0);

3559 Info.offset = 0;

3562 return true;

3563 }

3564

3565 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:

3566 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:

3567 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:

3568 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:

3569 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:

3570 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:

3571 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:

3572 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:

3573

3574 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:

3575 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:

3576 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:

3577 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:

3578 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:

3579 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:

3580 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:

3581 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:

3582 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:

3583 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:

3584 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:

3585 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:

3586 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:

3587 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:

3588 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:

3589 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:

3590 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:

3591 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:

3592 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:

3593 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:

3594 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:

3595 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16: {

3597 Info.memVT = MVT::i32;

3598 Info.ptrVal = I.getArgOperand(0);

3599 Info.offset = 0;

3602 return true;

3603 }

3604

3605 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:

3606 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:

3607 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:

3608 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:

3609 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:

3610 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:

3611 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:

3612 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:

3613 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:

3614 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:

3615 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:

3616 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {

3618 Info.memVT = MVT::v4f16;

3619 Info.ptrVal = I.getArgOperand(0);

3620 Info.offset = 0;

3623 return true;

3624 }

3625

3626 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:

3627 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:

3628 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:

3629 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:

3630 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:

3631 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:

3632 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:

3633 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:

3634 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:

3635 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:

3636 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:

3637 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:

3638 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:

3639 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:

3640 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:

3641 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {

3643 Info.memVT = MVT::v8f32;

3644 Info.ptrVal = I.getArgOperand(0);

3645 Info.offset = 0;

3648 return true;

3649 }

3650

3651 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:

3652 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:

3653 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:

3654 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:

3655

3656 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:

3657 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:

3658 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:

3659 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:

3660

3661 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:

3662 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:

3663 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:

3664 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:

3665 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:

3666 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:

3667 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:

3668 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:

3669 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:

3670 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:

3671 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:

3672 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {

3674 Info.memVT = MVT::v8i32;

3675 Info.ptrVal = I.getArgOperand(0);

3676 Info.offset = 0;

3679 return true;

3680 }

3681

3682 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:

3683 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:

3684 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:

3685 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:

3686 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:

3687 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:

3688 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:

3689 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:

3690 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:

3691 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16: {

3693 Info.memVT = MVT::v2i32;

3694 Info.ptrVal = I.getArgOperand(0);

3695 Info.offset = 0;

3698 return true;

3699 }

3700

3701 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:

3702 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:

3703 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:

3704 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:

3705

3706 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:

3707 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:

3708 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:

3709 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {

3711 Info.memVT = MVT::f64;

3712 Info.ptrVal = I.getArgOperand(0);

3713 Info.offset = 0;

3716 return true;

3717 }

3718

3719 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:

3720 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:

3721 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:

3722 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {

3724 Info.memVT = MVT::v2f64;

3725 Info.ptrVal = I.getArgOperand(0);

3726 Info.offset = 0;

3729 return true;

3730 }

3731

3732 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:

3733 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:

3734 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:

3735 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:

3736 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:

3737 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:

3738 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:

3739 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:

3740 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:

3741 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:

3742 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:

3743 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {

3745 Info.memVT = MVT::v4f16;

3746 Info.ptrVal = I.getArgOperand(0);

3747 Info.offset = 0;

3750 return true;

3751 }

3752

3753 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:

3754 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:

3755 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:

3756 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:

3757 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:

3758 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:

3759 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:

3760 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:

3761 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:

3762 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:

3763 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:

3764 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:

3765 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:

3766 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:

3767 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:

3768 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {

3770 Info.memVT = MVT::v8f32;

3771 Info.ptrVal = I.getArgOperand(0);

3772 Info.offset = 0;

3775 return true;

3776 }

3777

3778 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:

3779 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:

3780 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:

3781 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:

3782 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:

3783 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:

3784 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:

3785 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:

3786 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:

3787 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:

3788 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:

3789 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {

3791 Info.memVT = MVT::v8i32;

3792 Info.ptrVal = I.getArgOperand(0);

3793 Info.offset = 0;

3796 return true;

3797 }

3798

3799 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:

3800 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:

3801 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:

3802 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:

3803 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:

3804 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:

3805 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:

3806 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride: {

3808 Info.memVT = MVT::v2i32;

3809 Info.ptrVal = I.getArgOperand(0);

3810 Info.offset = 0;

3813 return true;

3814 }

3815

3816 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:

3817 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:

3818 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:

3819 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {

3821 Info.memVT = MVT::v2f64;

3822 Info.ptrVal = I.getArgOperand(0);

3823 Info.offset = 0;

3826 return true;

3827 }

3828

3829 case Intrinsic::nvvm_atomic_load_inc_32:

3830 case Intrinsic::nvvm_atomic_load_dec_32:

3831

3832 case Intrinsic::nvvm_atomic_add_gen_f_cta:

3833 case Intrinsic::nvvm_atomic_add_gen_f_sys:

3834 case Intrinsic::nvvm_atomic_add_gen_i_cta:

3835 case Intrinsic::nvvm_atomic_add_gen_i_sys:

3836 case Intrinsic::nvvm_atomic_and_gen_i_cta:

3837 case Intrinsic::nvvm_atomic_and_gen_i_sys:

3838 case Intrinsic::nvvm_atomic_cas_gen_i_cta:

3839 case Intrinsic::nvvm_atomic_cas_gen_i_sys:

3840 case Intrinsic::nvvm_atomic_dec_gen_i_cta:

3841 case Intrinsic::nvvm_atomic_dec_gen_i_sys:

3842 case Intrinsic::nvvm_atomic_inc_gen_i_cta:

3843 case Intrinsic::nvvm_atomic_inc_gen_i_sys:

3844 case Intrinsic::nvvm_atomic_max_gen_i_cta:

3845 case Intrinsic::nvvm_atomic_max_gen_i_sys:

3846 case Intrinsic::nvvm_atomic_min_gen_i_cta:

3847 case Intrinsic::nvvm_atomic_min_gen_i_sys:

3848 case Intrinsic::nvvm_atomic_or_gen_i_cta:

3849 case Intrinsic::nvvm_atomic_or_gen_i_sys:

3850 case Intrinsic::nvvm_atomic_exch_gen_i_cta:

3851 case Intrinsic::nvvm_atomic_exch_gen_i_sys:

3852 case Intrinsic::nvvm_atomic_xor_gen_i_cta:

3853 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {

3854 auto &DL = I.getDataLayout();

3857 Info.ptrVal = I.getArgOperand(0);

3858 Info.offset = 0;

3860 Info.align.reset();

3861 return true;

3862 }

3863

3864 case Intrinsic::nvvm_ldu_global_i:

3865 case Intrinsic::nvvm_ldu_global_f:

3866 case Intrinsic::nvvm_ldu_global_p: {

3867 auto &DL = I.getDataLayout();

3869 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)

3871 else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)

3873 else

3875 Info.ptrVal = I.getArgOperand(0);

3876 Info.offset = 0;

3878 Info.align = cast(I.getArgOperand(1))->getMaybeAlignValue();

3879

3880 return true;

3881 }

3882 case Intrinsic::nvvm_tex_1d_v4f32_s32:

3883 case Intrinsic::nvvm_tex_1d_v4f32_f32:

3884 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:

3885 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:

3886 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:

3887 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:

3888 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:

3889 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:

3890 case Intrinsic::nvvm_tex_2d_v4f32_s32:

3891 case Intrinsic::nvvm_tex_2d_v4f32_f32:

3892 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:

3893 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:

3894 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:

3895 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:

3896 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:

3897 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:

3898 case Intrinsic::nvvm_tex_3d_v4f32_s32:

3899 case Intrinsic::nvvm_tex_3d_v4f32_f32:

3900 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:

3901 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:

3902 case Intrinsic::nvvm_tex_cube_v4f32_f32:

3903 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:

3904 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:

3905 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:

3906 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:

3907 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:

3908 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:

3909 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:

3910 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:

3911 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:

3912 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:

3913 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:

3914 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:

3915 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:

3916 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:

3917 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:

3918 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:

3919 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:

3920 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:

3921 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:

3922 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:

3923 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:

3924 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:

3925 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:

3926 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:

3927 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:

3928 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:

3929 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:

3930 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:

3931 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:

3932 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:

3933 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:

3934 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:

3935 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:

3936 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:

3937 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:

3938 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:

3939 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:

3941 Info.memVT = MVT::v4f32;

3942 Info.ptrVal = nullptr;

3943 Info.offset = 0;

3946 return true;

3947

3948 case Intrinsic::nvvm_tex_1d_v4s32_s32:

3949 case Intrinsic::nvvm_tex_1d_v4s32_f32:

3950 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:

3951 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:

3952 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:

3953 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:

3954 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:

3955 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:

3956 case Intrinsic::nvvm_tex_2d_v4s32_s32:

3957 case Intrinsic::nvvm_tex_2d_v4s32_f32:

3958 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:

3959 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:

3960 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:

3961 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:

3962 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:

3963 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:

3964 case Intrinsic::nvvm_tex_3d_v4s32_s32:

3965 case Intrinsic::nvvm_tex_3d_v4s32_f32:

3966 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:

3967 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:

3968 case Intrinsic::nvvm_tex_cube_v4s32_f32:

3969 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:

3970 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:

3971 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:

3972 case Intrinsic::nvvm_tex_cube_v4u32_f32:

3973 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:

3974 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:

3975 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:

3976 case Intrinsic::nvvm_tex_1d_v4u32_s32:

3977 case Intrinsic::nvvm_tex_1d_v4u32_f32:

3978 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:

3979 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:

3980 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:

3981 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:

3982 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:

3983 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:

3984 case Intrinsic::nvvm_tex_2d_v4u32_s32:

3985 case Intrinsic::nvvm_tex_2d_v4u32_f32:

3986 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:

3987 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:

3988 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:

3989 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:

3990 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:

3991 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:

3992 case Intrinsic::nvvm_tex_3d_v4u32_s32:

3993 case Intrinsic::nvvm_tex_3d_v4u32_f32:

3994 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:

3995 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:

3996 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:

3997 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:

3998 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:

3999 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:

4000 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:

4001 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:

4002 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:

4003 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:

4004 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:

4005 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:

4006 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:

4007 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:

4008 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:

4009 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:

4010 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:

4011 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:

4012 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:

4013 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:

4014 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:

4015 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:

4016 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:

4017 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:

4018 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:

4019 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:

4020 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:

4021 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:

4022 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:

4023 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:

4024 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:

4025 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:

4026 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:

4027 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:

4028 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:

4029 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:

4030 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:

4031 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:

4032 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:

4033 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:

4034 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:

4035 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:

4036 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:

4037 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:

4038 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:

4039 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:

4040 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:

4041 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:

4042 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:

4043 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:

4044 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:

4045 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:

4046 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:

4047 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:

4048 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:

4049 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:

4050 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:

4051 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:

4052 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:

4053 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:

4054 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:

4055 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:

4056 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:

4057 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:

4058 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:

4059 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:

4060 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:

4061 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:

4062 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:

4063 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:

4065 Info.memVT = MVT::v4i32;

4066 Info.ptrVal = nullptr;

4067 Info.offset = 0;

4070 return true;

4071

4072 case Intrinsic::nvvm_suld_1d_i8_clamp:

4073 case Intrinsic::nvvm_suld_1d_v2i8_clamp:

4074 case Intrinsic::nvvm_suld_1d_v4i8_clamp:

4075 case Intrinsic::nvvm_suld_1d_array_i8_clamp:

4076 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:

4077 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:

4078 case Intrinsic::nvvm_suld_2d_i8_clamp:

4079 case Intrinsic::nvvm_suld_2d_v2i8_clamp:

4080 case Intrinsic::nvvm_suld_2d_v4i8_clamp:

4081 case Intrinsic::nvvm_suld_2d_array_i8_clamp:

4082 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:

4083 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:

4084 case Intrinsic::nvvm_suld_3d_i8_clamp:

4085 case Intrinsic::nvvm_suld_3d_v2i8_clamp:

4086 case Intrinsic::nvvm_suld_3d_v4i8_clamp:

4087 case Intrinsic::nvvm_suld_1d_i8_trap:

4088 case Intrinsic::nvvm_suld_1d_v2i8_trap:

4089 case Intrinsic::nvvm_suld_1d_v4i8_trap:

4090 case Intrinsic::nvvm_suld_1d_array_i8_trap:

4091 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:

4092 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:

4093 case Intrinsic::nvvm_suld_2d_i8_trap:

4094 case Intrinsic::nvvm_suld_2d_v2i8_trap:

4095 case Intrinsic::nvvm_suld_2d_v4i8_trap:

4096 case Intrinsic::nvvm_suld_2d_array_i8_trap:

4097 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:

4098 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:

4099 case Intrinsic::nvvm_suld_3d_i8_trap:

4100 case Intrinsic::nvvm_suld_3d_v2i8_trap:

4101 case Intrinsic::nvvm_suld_3d_v4i8_trap:

4102 case Intrinsic::nvvm_suld_1d_i8_zero:

4103 case Intrinsic::nvvm_suld_1d_v2i8_zero:

4104 case Intrinsic::nvvm_suld_1d_v4i8_zero:

4105 case Intrinsic::nvvm_suld_1d_array_i8_zero:

4106 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:

4107 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:

4108 case Intrinsic::nvvm_suld_2d_i8_zero:

4109 case Intrinsic::nvvm_suld_2d_v2i8_zero:

4110 case Intrinsic::nvvm_suld_2d_v4i8_zero:

4111 case Intrinsic::nvvm_suld_2d_array_i8_zero:

4112 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:

4113 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:

4114 case Intrinsic::nvvm_suld_3d_i8_zero:

4115 case Intrinsic::nvvm_suld_3d_v2i8_zero:

4116 case Intrinsic::nvvm_suld_3d_v4i8_zero:

4118 Info.memVT = MVT::i8;

4119 Info.ptrVal = nullptr;

4120 Info.offset = 0;

4123 return true;

4124

4125 case Intrinsic::nvvm_suld_1d_i16_clamp:

4126 case Intrinsic::nvvm_suld_1d_v2i16_clamp:

4127 case Intrinsic::nvvm_suld_1d_v4i16_clamp:

4128 case Intrinsic::nvvm_suld_1d_array_i16_clamp:

4129 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:

4130 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:

4131 case Intrinsic::nvvm_suld_2d_i16_clamp:

4132 case Intrinsic::nvvm_suld_2d_v2i16_clamp:

4133 case Intrinsic::nvvm_suld_2d_v4i16_clamp:

4134 case Intrinsic::nvvm_suld_2d_array_i16_clamp:

4135 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:

4136 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:

4137 case Intrinsic::nvvm_suld_3d_i16_clamp:

4138 case Intrinsic::nvvm_suld_3d_v2i16_clamp:

4139 case Intrinsic::nvvm_suld_3d_v4i16_clamp:

4140 case Intrinsic::nvvm_suld_1d_i16_trap:

4141 case Intrinsic::nvvm_suld_1d_v2i16_trap:

4142 case Intrinsic::nvvm_suld_1d_v4i16_trap:

4143 case Intrinsic::nvvm_suld_1d_array_i16_trap:

4144 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:

4145 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:

4146 case Intrinsic::nvvm_suld_2d_i16_trap:

4147 case Intrinsic::nvvm_suld_2d_v2i16_trap:

4148 case Intrinsic::nvvm_suld_2d_v4i16_trap:

4149 case Intrinsic::nvvm_suld_2d_array_i16_trap:

4150 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:

4151 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:

4152 case Intrinsic::nvvm_suld_3d_i16_trap:

4153 case Intrinsic::nvvm_suld_3d_v2i16_trap:

4154 case Intrinsic::nvvm_suld_3d_v4i16_trap:

4155 case Intrinsic::nvvm_suld_1d_i16_zero:

4156 case Intrinsic::nvvm_suld_1d_v2i16_zero:

4157 case Intrinsic::nvvm_suld_1d_v4i16_zero:

4158 case Intrinsic::nvvm_suld_1d_array_i16_zero:

4159 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:

4160 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:

4161 case Intrinsic::nvvm_suld_2d_i16_zero:

4162 case Intrinsic::nvvm_suld_2d_v2i16_zero:

4163 case Intrinsic::nvvm_suld_2d_v4i16_zero:

4164 case Intrinsic::nvvm_suld_2d_array_i16_zero:

4165 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:

4166 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:

4167 case Intrinsic::nvvm_suld_3d_i16_zero:

4168 case Intrinsic::nvvm_suld_3d_v2i16_zero:

4169 case Intrinsic::nvvm_suld_3d_v4i16_zero:

4171 Info.memVT = MVT::i16;

4172 Info.ptrVal = nullptr;

4173 Info.offset = 0;

4176 return true;

4177

4178 case Intrinsic::nvvm_suld_1d_i32_clamp:

4179 case Intrinsic::nvvm_suld_1d_v2i32_clamp:

4180 case Intrinsic::nvvm_suld_1d_v4i32_clamp:

4181 case Intrinsic::nvvm_suld_1d_array_i32_clamp:

4182 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:

4183 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:

4184 case Intrinsic::nvvm_suld_2d_i32_clamp:

4185 case Intrinsic::nvvm_suld_2d_v2i32_clamp:

4186 case Intrinsic::nvvm_suld_2d_v4i32_clamp:

4187 case Intrinsic::nvvm_suld_2d_array_i32_clamp:

4188 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:

4189 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:

4190 case Intrinsic::nvvm_suld_3d_i32_clamp:

4191 case Intrinsic::nvvm_suld_3d_v2i32_clamp:

4192 case Intrinsic::nvvm_suld_3d_v4i32_clamp:

4193 case Intrinsic::nvvm_suld_1d_i32_trap:

4194 case Intrinsic::nvvm_suld_1d_v2i32_trap:

4195 case Intrinsic::nvvm_suld_1d_v4i32_trap:

4196 case Intrinsic::nvvm_suld_1d_array_i32_trap:

4197 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:

4198 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:

4199 case Intrinsic::nvvm_suld_2d_i32_trap:

4200 case Intrinsic::nvvm_suld_2d_v2i32_trap:

4201 case Intrinsic::nvvm_suld_2d_v4i32_trap:

4202 case Intrinsic::nvvm_suld_2d_array_i32_trap:

4203 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:

4204 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:

4205 case Intrinsic::nvvm_suld_3d_i32_trap:

4206 case Intrinsic::nvvm_suld_3d_v2i32_trap:

4207 case Intrinsic::nvvm_suld_3d_v4i32_trap:

4208 case Intrinsic::nvvm_suld_1d_i32_zero:

4209 case Intrinsic::nvvm_suld_1d_v2i32_zero:

4210 case Intrinsic::nvvm_suld_1d_v4i32_zero:

4211 case Intrinsic::nvvm_suld_1d_array_i32_zero:

4212 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:

4213 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:

4214 case Intrinsic::nvvm_suld_2d_i32_zero:

4215 case Intrinsic::nvvm_suld_2d_v2i32_zero:

4216 case Intrinsic::nvvm_suld_2d_v4i32_zero:

4217 case Intrinsic::nvvm_suld_2d_array_i32_zero:

4218 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:

4219 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:

4220 case Intrinsic::nvvm_suld_3d_i32_zero:

4221 case Intrinsic::nvvm_suld_3d_v2i32_zero:

4222 case Intrinsic::nvvm_suld_3d_v4i32_zero:

4224 Info.memVT = MVT::i32;

4225 Info.ptrVal = nullptr;

4226 Info.offset = 0;

4229 return true;

4230

4231 case Intrinsic::nvvm_suld_1d_i64_clamp:

4232 case Intrinsic::nvvm_suld_1d_v2i64_clamp:

4233 case Intrinsic::nvvm_suld_1d_array_i64_clamp:

4234 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:

4235 case Intrinsic::nvvm_suld_2d_i64_clamp:

4236 case Intrinsic::nvvm_suld_2d_v2i64_clamp:

4237 case Intrinsic::nvvm_suld_2d_array_i64_clamp:

4238 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:

4239 case Intrinsic::nvvm_suld_3d_i64_clamp:

4240 case Intrinsic::nvvm_suld_3d_v2i64_clamp:

4241 case Intrinsic::nvvm_suld_1d_i64_trap:

4242 case Intrinsic::nvvm_suld_1d_v2i64_trap:

4243 case Intrinsic::nvvm_suld_1d_array_i64_trap:

4244 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:

4245 case Intrinsic::nvvm_suld_2d_i64_trap:

4246 case Intrinsic::nvvm_suld_2d_v2i64_trap:

4247 case Intrinsic::nvvm_suld_2d_array_i64_trap:

4248 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:

4249 case Intrinsic::nvvm_suld_3d_i64_trap:

4250 case Intrinsic::nvvm_suld_3d_v2i64_trap:

4251 case Intrinsic::nvvm_suld_1d_i64_zero:

4252 case Intrinsic::nvvm_suld_1d_v2i64_zero:

4253 case Intrinsic::nvvm_suld_1d_array_i64_zero:

4254 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:

4255 case Intrinsic::nvvm_suld_2d_i64_zero:

4256 case Intrinsic::nvvm_suld_2d_v2i64_zero:

4257 case Intrinsic::nvvm_suld_2d_array_i64_zero:

4258 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:

4259 case Intrinsic::nvvm_suld_3d_i64_zero:

4260 case Intrinsic::nvvm_suld_3d_v2i64_zero:

4262 Info.memVT = MVT::i64;

4263 Info.ptrVal = nullptr;

4264 Info.offset = 0;

4267 return true;

4268 }

4269 return false;

4270}

4271

4272

4273

4274

4275

4276

4277

4278

4281

4282

4283 const Align ABITypeAlign = std::min(Align(128), DL.getABITypeAlign(ArgTy));

4284

4285

4286

4287

4288 if (F || F->hasLocalLinkage() ||

4289 F->hasAddressTaken(nullptr,

4290 false,

4291 true,

4292 true))

4293 return ABITypeAlign;

4294

4296 return std::max(Align(16), ABITypeAlign);

4297}

4298

4299

4303 Align ArgAlign = InitialAlign;

4304

4305 if (F)

4307

4308

4309

4310

4311

4312

4313

4314

4315

4316

4318 ArgAlign = std::max(ArgAlign, Align(4));

4319

4320 return ArgAlign;

4321}

4322

4323

4324

4325

4327 int Idx) const {

4328 std::string ParamName;

4330

4332 if (Idx < 0)

4333 ParamStr << "_vararg";

4334 else

4335 ParamStr << "_param_" << Idx;

4336

4337 return ParamName;

4338}

4339

4340

4341

4342

4343

4344

4348

4349

4350

4351

4352

4353

4354

4355

4356

4357

4359 return false;

4360

4363

4364 switch (AM.Scale) {

4365 case 0:

4366 break;

4367 case 1:

4368 if (AM.HasBaseReg)

4369 return false;

4370

4371 break;

4372 default:

4373

4374 return false;

4375 }

4376 return true;

4377}

4378

4379

4380

4381

4382

4383

4384

4387 if (Constraint.size() == 1) {

4388 switch (Constraint[0]) {

4389 default:

4390 break;

4391 case 'b':

4392 case 'r':

4393 case 'h':

4394 case 'c':

4395 case 'l':

4396 case 'f':

4397 case 'd':

4398 case 'q':

4399 case '0':

4400 case 'N':

4402 }

4403 }

4405}

4406

4407std::pair<unsigned, const TargetRegisterClass *>

4410 MVT VT) const {

4411 if (Constraint.size() == 1) {

4412 switch (Constraint[0]) {

4413 case 'b':

4414 return std::make_pair(0U, &NVPTX::Int1RegsRegClass);

4415 case 'c':

4416 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);

4417 case 'h':

4418 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);

4419 case 'r':

4420 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);

4421 case 'l':

4422 case 'N':

4423 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);

4424 case 'q': {

4427 "supported for sm_70 and higher!");

4428 return std::make_pair(0U, &NVPTX::Int128RegsRegClass);

4429 }

4430 case 'f':

4431 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);

4432 case 'd':

4433 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);

4434 }

4435 }

4437}

4438

4439

4440

4441

4442

4445

4448

4449

4451 return false;

4452

4453

4455 return true;

4456

4458}

4459

4461

4463 return true;

4464

4465

4467 return F.getFnAttribute("unsafe-fp-math").getValueAsBool();

4468}

4469

4471 const auto *Const = dyn_cast(Operand);

4472 return Const && Const->getZExtValue() == 0;

4473}

4474

4475

4476

4477

4478

4483

4484

4485

4486

4487

4490

4491

4492

4493

4495 unsigned ZeroOpNum;

4497 ZeroOpNum = 1;

4499 ZeroOpNum = 2;

4500 else

4502

4504 if (M->getOpcode() != ISD::MUL || !M.getNode()->hasOneUse())

4506

4512 ((ZeroOpNum == 1) ? N1 : MAD),

4513 ((ZeroOpNum == 1) ? MAD : N1));

4514 }

4515

4517}

4518

4529

4530

4531

4532

4533

4534

4535

4536

4537

4538 int numUses = 0;

4539 int nonAddCount = 0;

4541 numUses++;

4543 ++nonAddCount;

4544 if (numUses >= 5)

4546 }

4547 if (nonAddCount) {

4548 int orderNo = N->getIROrder();

4550

4551

4552

4553

4554 if (orderNo - orderNo2 < 500)

4556

4557

4558

4559

4560 bool opIsLive = false;

4563

4564 if (isa(left) || isa(right))

4565 opIsLive = true;

4566

4567 if (!opIsLive)

4569 int orderNo3 = User->getIROrder();

4570 if (orderNo3 > orderNo) {

4571 opIsLive = true;

4572 break;

4573 }

4574 }

4575

4576 if (!opIsLive)

4578 int orderNo3 = User->getIROrder();

4579 if (orderNo3 > orderNo) {

4580 opIsLive = true;

4581 break;

4582 }

4583 }

4584

4585 if (!opIsLive)

4587 }

4588

4591 }

4592

4594}

4595

4597 std::size_t Back) {

4598 if (all_of(N->ops().drop_front(Front).drop_back(Back),

4599 [](const SDUse &U) { return U.get()->isUndef(); }))

4600

4601

4602 return N->getOperand(0);

4603

4605}

4606

4608

4609

4611}

4612

4614

4616}

4617

4618

4619

4625

4626 SDValue N0 = N->getOperand(0);

4627 SDValue N1 = N->getOperand(1);

4628

4629

4631 if (VT.isVector() || VT != MVT::i32)

4633

4634

4636 return Result;

4637

4638

4640}

4641

4642

4643

4647 SDValue N0 = N->getOperand(0);

4648 SDValue N1 = N->getOperand(1);

4649

4651 if (VT.isVector() || !(VT == MVT::f32 || VT == MVT::f64))

4653

4654

4656 return Result;

4657

4658

4660}

4661

4664

4665

4666

4667

4668

4669 SDValue Val = N->getOperand(0);

4670 SDValue Mask = N->getOperand(1);

4671

4672 if (isa(Val)) {

4674 }

4675

4677

4678

4679

4680

4685

4686 ConstantSDNode *BFEBits = dyn_cast(BFE.getOperand(0));

4687 if (!BFEBits)

4690

4691 ConstantSDNode *MaskCnst = dyn_cast(Mask);

4692 if (!MaskCnst) {

4693

4695 }

4697

4698 if (MaskVal != (uint64_t(1) << BFEBitsVal) - 1)

4700

4702 }

4703

4705 AExt = Val;

4707 }

4708

4711 ConstantSDNode *MaskCnst = dyn_cast(Mask);

4712 if (!MaskCnst) {

4713

4715 }

4716

4718 if (MaskVal != 0xff) {

4719

4721 }

4722

4723 MemSDNode *Mem = dyn_cast(Val);

4724 if (!Mem) {

4725

4727 }

4728

4730 if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {

4731

4733 }

4734

4737

4738

4740 }

4741

4742 bool AddTo = false;

4743 if (AExt.getNode() != nullptr) {

4744

4747 AddTo = true;

4748 }

4749

4750

4752 }

4753

4755}

4756

4761

4762

4765

4768 EVT VT = N->getValueType(0);

4769 bool IsSigned = N->getOpcode() == ISD::SREM;

4771

4772 const SDValue &Num = N->getOperand(0);

4773 const SDValue &Den = N->getOperand(1);

4774

4776 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&

4777 U->getOperand(1) == Den) {

4778

4781 DAG.getNode(DivOpc, DL, VT, Num, Den),

4782 Den));

4783 }

4784 }

4786}

4787

4793

4794

4795

4796

4798 unsigned OptSize,

4801

4804 EVT OrigVT = Op.getOperand(0).getValueType();

4807 return true;

4808 }

4810 EVT OrigVT = Op.getOperand(0).getValueType();

4813 return true;

4814 }

4815 }

4816

4817 return false;

4818}

4819

4820

4821

4822

4823

4825 unsigned OptSize,

4826 bool &IsSigned) {

4828

4829

4831 return false;

4832

4833

4835 return false;

4836

4837 IsSigned = (LHSSign == Signed);

4838

4839

4841 const APInt &Val = CI->getAPIntValue();

4843 return Val.isIntN(OptSize);

4844 } else {

4846 }

4847 } else {

4850 return false;

4851

4852 return LHSSign == RHSSign;

4853 }

4854}

4855

4856

4857

4858

4859

4862 EVT MulType = N->getValueType(0);

4863 if (MulType != MVT::i32 && MulType != MVT::i64) {

4865 }

4866

4868 unsigned OptSize = MulType.getSizeInBits() >> 1;

4871

4872

4873 if (N->getOpcode() == ISD::MUL) {

4874 if (isa(LHS)) {

4876 }

4877 }

4878

4879

4880 if (N->getOpcode() == ISD::SHL) {

4882 if (!ShlRHS) {

4884 }

4885

4891 } else {

4893 }

4894 }

4895

4897

4900 }

4901

4902 EVT DemotedVT;

4903 if (MulType == MVT::i32) {

4904 DemotedVT = MVT::i16;

4905 } else {

4906 DemotedVT = MVT::i32;

4907 }

4908

4909

4910

4915

4916 unsigned Opc;

4919 } else {

4921 }

4922

4923 return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);

4924}

4925

4927 const auto *Const = dyn_cast(Operand);

4928 return Const && Const->getZExtValue() == 1;

4929}

4930

4934

4936 return Add->getOperand(1);

4937

4939 return Add->getOperand(0);

4940

4942}

4943

4946

4950 }

4951

4953}

4954

4960

4962

4963 unsigned ConstOpNo;

4965 ConstOpNo = 1;

4967 ConstOpNo = 2;

4968 else

4970

4971 SDValue Y = Select->getOperand((ConstOpNo == 1) ? 2 : 1);

4972

4973

4976

4978

4980 (ConstOpNo == 1) ? X : NewMul,

4981 (ConstOpNo == 1) ? NewMul : X);

4982}

4983

4987

4991

4992 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)

4994

4996

4997

4999 return Res;

5001 return Res;

5002

5003

5005 return Res;

5007 return Res;

5008

5010}

5011

5012

5018

5020 return Ret;

5021

5022 SDValue N0 = N->getOperand(0);

5023 SDValue N1 = N->getOperand(1);

5025}

5026

5027

5032

5034 return Ret;

5035 }

5036

5038}

5039

5043 EVT CCType = N->getValueType(0);

5046

5047 EVT AType = A.getValueType();

5048 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))

5050

5051 if (A.getValueType() == MVT::v2bf16 && SmVersion < 90)

5053

5055

5056

5057

5058

5062 DL, DCI.DAG.getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});

5065}

5066

5073 EVT VectorVT = Vector.getValueType();

5076 return SDValue();

5077

5078

5079

5081 VectorVT == MVT::v4i8 || VectorVT == MVT::v8i8)

5083

5084

5087

5089

5090 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))

5092

5093 ConstantSDNode *Index = dyn_cast(N->getOperand(1));

5094

5095 if (!Index || Index->getZExtValue() == 0)

5097

5102

5107 DCI.DAG.getConstant(Index->getZExtValue() * EltBits, DL, IVT)));

5108

5109

5110 if (EltVT != EltIVT)

5112

5113 if (EltVT != N->getValueType(0))

5115

5116 return Result;

5117}

5118

5121 SDValue VA = N->getOperand(1);

5123 if (VectorVT != MVT::v4i8)

5125

5126

5127

5128

5129

5132 SDValue VCond = N->getOperand(0);

5133 SDValue VB = N->getOperand(2);

5134 for (int I = 0; I < 4; ++I) {

5140 DL, MVT::i32);

5144 DL, MVT::i32);

5147 }

5149}

5150

5153 auto VT = N->getValueType(0);

5156

5157 auto Op0 = N->getOperand(0);

5158 auto Op1 = N->getOperand(1);

5159

5160

5161

5164

5165 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},

5166 {&Op1, &Op1Bytes}};

5167

5168

5169

5170

5171 for (auto &[Op, OpBytes] : OpData) {

5172

5174 *Op = Op->getOperand(0);

5175

5176 if (!(Op->getValueType() == MVT::i16 && Op->getOpcode() == ISD::TRUNCATE &&

5177 Op->getOperand(0).getValueType() == MVT::i32))

5179

5180

5181

5182 if (Op->hasOneUse())

5184

5185 *Op = Op->getOperand(0);

5186

5187

5188

5189 if (Op->getOpcode() == ISD::SRL && isa(Op->getOperand(1))) {

5190 if (cast(Op->getOperand(1))->getZExtValue() == 16) {

5191

5192

5193 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&

5194 "PRMT selector values out of range");

5195 *OpBytes += 0x22;

5196 *Op = Op->getOperand(0);

5197 }

5198 }

5199 }

5200

5202 auto &DAG = DCI.DAG;

5203

5204 auto PRMT = DAG.getNode(

5206 {Op0, Op1, DAG.getConstant((Op1Bytes << 8) | Op0Bytes, DL, MVT::i32),

5209}

5210

5211SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,

5212 DAGCombinerInfo &DCI) const {

5214 switch (N->getOpcode()) {

5215 default: break;

5245 }

5247}

5248

5251

5252

5254 EVT ToVT = Op->getValueType(0);

5255 if (ToVT != MVT::v2i8) {

5256 return;

5257 }

5258

5259

5269}

5270

5271

5274 EVT ResVT = N->getValueType(0);

5276

5277 assert(ResVT.isVector() && "Vector load must have vector type");

5278

5280 if (!NumEltsAndEltVT)

5281 return;

5282 auto [NumElts, EltVT] = NumEltsAndEltVT.value();

5283

5285

5286 Align Alignment = LD->getAlign();

5288 Align PrefAlign =

5290 if (Alignment < PrefAlign) {

5291

5292

5293

5294

5295

5296 return;

5297 }

5298

5299

5300

5301

5302 bool NeedTrunc = false;

5304 EltVT = MVT::i16;

5305 NeedTrunc = true;

5306 }

5307

5308 unsigned Opcode = 0;

5310

5311 switch (NumElts) {

5312 default:

5313 return;

5314 case 2:

5316 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);

5317 break;

5318 case 4: {

5320 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };

5321 LdResVTs = DAG.getVTList(ListVTs);

5322 break;

5323 }

5324 }

5325

5326

5328

5329

5330

5332

5334 LD->getMemoryVT(),

5335 LD->getMemOperand());

5336

5339 "NumElts should not increase, only decrease or stay the same.");

5341

5342

5345

5346

5347 for (unsigned i = 0; i < NumElts; ++i) {

5350 }

5351 } else {

5352 for (unsigned i = 0; i < NumElts; ++i) {

5354 if (NeedTrunc)

5357 }

5358 }

5359

5361

5363

5364 Results.push_back(BuildVec);

5365 Results.push_back(LoadChain);

5366}

5367

5370 SDValue Chain = N->getOperand(0);

5371 SDValue Intrin = N->getOperand(1);

5373

5374

5376 switch (IntrinNo) {

5377 default:

5378 return;

5379 case Intrinsic::nvvm_ldu_global_i:

5380 case Intrinsic::nvvm_ldu_global_f:

5381 case Intrinsic::nvvm_ldu_global_p: {

5382 EVT ResVT = N->getValueType(0);

5383

5385

5386

5389

5390

5391

5392

5393

5394 bool NeedTrunc = false;

5396 EltVT = MVT::i16;

5397 NeedTrunc = true;

5398 }

5399

5400 unsigned Opcode = 0;

5402

5403 switch (NumElts) {

5404 default:

5405 return;

5406 case 2:

5408 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);

5409 break;

5410 case 4: {

5412 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };

5413 LdResVTs = DAG.getVTList(ListVTs);

5414 break;

5415 }

5416 }

5417

5419

5420

5421

5422 OtherOps.push_back(Chain);

5423

5424

5425 OtherOps.append(N->op_begin() + 2, N->op_end());

5426

5428

5432

5434

5435 for (unsigned i = 0; i < NumElts; ++i) {

5437 if (NeedTrunc)

5438 Res =

5441 }

5442

5444

5447

5448 Results.push_back(BuildVec);

5449 Results.push_back(LoadChain);

5450 } else {

5451

5453 "Custom handling of non-i8 ldu/ldg?");

5454

5455

5457

5458

5460

5462

5463

5464

5468

5472 }

5473 }

5474 }

5475}

5476

5479

5480

5482 SDValue Chain = N->getOperand(0);

5483 SDValue Reg = N->getOperand(1);

5484 SDValue Glue = N->getOperand(2);

5485

5486 assert(Reg.getValueType() == MVT::i128 &&

5487 "Custom lowering for CopyFromReg with 128-bit reg only");

5489 N->getValueType(2)};

5491

5495

5496 Results.push_back(Pair);

5499}

5500

5501void NVPTXTargetLowering::ReplaceNodeResults(

5503 switch (N->getOpcode()) {

5504 default:

5508 return;

5511 return;

5514 return;

5517 return;

5518 }

5519}

5520

5524

5537 }

5539 }

5540

5541 assert(Ty->isIntegerTy() && "Ty should be integer at this point");

5542 auto ITy = castllvm::IntegerType(Ty);

5543

5545 default:

5551 switch (ITy->getBitWidth()) {

5552 case 8:

5553 case 16:

5555 case 32:

5557 case 64:

5561 default:

5563 }

5570 switch (ITy->getBitWidth()) {

5571 case 8:

5572 case 16:

5574 case 32:

5576 case 64:

5580 default:

5582 }

5583 }

5584

5586}

5587

5588

5590

5594}

AMDGPU Register Bank Select

This file implements a class to represent arbitrary precision integral constant values and operations...

static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)

PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.

static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)

PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.

static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)

static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)

static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)

static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)

static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)

PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Function Alias Analysis Results

This file contains the simple types necessary to represent the attributes associated with functions a...

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

Analysis containing CSE Info

This file contains the declarations for the subclasses of Constant, which represent the different fla...

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")

This file contains the declarations of entities that describe floating point environment and related ...

Module.h This file contains the declarations for the Module class.

static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)

Return the first found DebugLoc that has a DILocation, given a range of instructions.

unsigned const TargetRegisterInfo * TRI

NVPTX address space definition.

static bool shouldConvertToIndirectCall(const CallBase *CB, const GlobalAddressSDNode *Func)

static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))

static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)

static bool isConstOne(const SDValue &Operand)

static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))

static bool IsPTXVectorType(MVT VT)

static cl::opt< int > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use" " IEEE Compliant F32 div.rnd if available."), cl::init(2))

static SDValue PerformStoreParamCombine(SDNode *N)

static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)

ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.

static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)

static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)

static bool Is16bitsType(MVT VT)

static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)

static bool IsTypePassedAsArray(const Type *Ty)

static SmallVector< ParamVectorizationFlags, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< uint64_t > &Offsets, Align ParamAlignment, bool IsVAArg=false)

static unsigned CanMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< uint64_t > &Offsets, Align ParamAlignment)

static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)

static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)

static bool isConstZero(const SDValue &Operand)

static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)

static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)

ComputePTXValueVTs - For the given Type Ty, returns the set of primitive EVTs that compose it.

static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)

IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...

static SDValue LowerUnalignedStoreParam(SelectionDAG &DAG, SDValue Chain, uint64_t Offset, EVT ElementType, SDValue StVal, SDValue &InGlue, unsigned ArgID, const SDLoc &dl)

static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)

static std::optional< std::pair< unsigned int, EVT > > getVectorLoweringShape(EVT VectorVT)

static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)

static SDValue PerformStoreRetvalCombine(SDNode *N)

static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)

AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...

static SDValue PerformStoreCombineHelper(SDNode *N, std::size_t Front, std::size_t Back)

static bool adjustElementType(EVT &ElementType)

static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)

TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...

static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)

static SDValue matchMADConstOnePattern(SDValue Add)

static SDValue MaybeBitcast(SelectionDAG &DAG, SDLoc DL, EVT VT, SDValue Value)

static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))

static SDValue LowerUnalignedStoreRet(SelectionDAG &DAG, SDValue Chain, uint64_t Offset, EVT ElementType, SDValue RetVal, const SDLoc &dl)

static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)

static bool PromoteScalarIntegerPTX(const EVT &VT, MVT *PromotedVT)

PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...

static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)

static SDValue LowerUnalignedLoadRetParam(SelectionDAG &DAG, SDValue &Chain, uint64_t Offset, EVT ElementType, SDValue &InGlue, SmallVectorImpl< SDValue > &TempProxyRegOps, const SDLoc &dl)

static std::atomic< unsigned > GlobalUniqueCallSite

static cl::opt< bool > ForceMinByValParamAlign("nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" " params of device functions."), cl::init(false))

static cl::opt< bool > UseApproxLog2F32("nvptx-approx-log2f32", cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), cl::init(false))

Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it does NOT use lg2....

static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)

PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.

static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")

const SmallVectorImpl< MachineOperand > & Cond

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

This file defines the SmallVector class.

This file describes how to lower LLVM code to machine code.

Class for arbitrary precision integers.

bool isSignedIntN(unsigned N) const

Check if this APInt has an N-bits signed integer value.

bool slt(const APInt &RHS) const

Signed less than comparison.

bool isIntN(unsigned N) const

Check if this APInt has an N-bits unsigned integer value.

bool sge(const APInt &RHS) const

Signed greater or equal comparison.

This class represents an incoming formal argument to a Function.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

const T & back() const

back - Get the last element.

ArrayRef< T > drop_back(size_t N=1) const

Drop the last N elements of the array.

bool empty() const

empty - Check if the array is empty.

an instruction that atomically reads a memory location, combines it with another value,...

@ Min

*p = old <signed v ? old : v

@ Max

*p = old >signed v ? old : v

@ UMin

*p = old <unsigned v ? old : v

@ UMax

*p = old >unsigned v ? old : v

bool isFloatingPointOperation() const

BinOp getOperation() const

bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Return true if the attribute exists for the given argument.

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

FunctionType * getFunctionType() const

This class represents a function call, abstracting a target machine's calling convention.

uint64_t getZExtValue() const

const APInt & getAPIntValue() const

static Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

This class represents an Operation in the Expression.

uint64_t getNumOperands() const

A parsed version of the target data layout string in and methods for querying it.

TypeSize getTypeAllocSize(Type *Ty) const

Returns the offset in bytes between successive objects of the specified type, including alignment pad...

Align getPrefTypeAlign(Type *Ty) const

Returns the preferred stack/global alignment for the specified type.

Diagnostic information for unsupported feature in backend.

void addFnAttr(Attribute::AttrKind Kind)

Add function attributes to this function.

Type * getReturnType() const

Returns the type of the ret val.

unsigned getAddressSpace() const

const GlobalValue * getGlobal() const

This is an important class for using LLVM in a threaded context.

void diagnose(const DiagnosticInfo &DI)

Report a message to the currently installed diagnostic handler.

This class is used to represent ISD::LOAD nodes.

MCSection * getDataSection() const

Instances of this class represent a uniqued identifier for a section in the current translation unit.

StringRef getName() const

getName - Get the symbol name.

unsigned getVectorNumElements() const

bool isScalableVector() const

Return true if this is a vector value type where the runtime length is machine dependent.

static auto integer_valuetypes()

static auto fixedlen_vector_valuetypes()

static MVT getVectorVT(MVT VT, unsigned NumElements)

static MVT getIntegerVT(unsigned BitWidth)

MVT getScalarType() const

If this is a vector, return the element type, otherwise return this.

DenormalMode getDenormalMode(const fltSemantics &FPType) const

Returns the denormal handling type for the default rounding mode of the function.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

Function & getFunction()

Return the LLVM function that this machine code represents.

const MachineJumpTableInfo * getJumpTableInfo() const

getJumpTableInfo - Return the jump table info object for the current function.

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

@ EK_Inline

EK_Inline - Jump table entries are emitted inline at their point of use.

const std::vector< MachineJumpTableEntry > & getJumpTables() const

@ MODereferenceable

The memory access is dereferenceable (i.e., doesn't trap).

@ MOLoad

The memory access reads data.

@ MOInvariant

The memory access always returns the same value (or traps).

@ MOStore

The memory access writes data.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")

createVirtualRegister - Create and return a new virtual register in the function with the specified r...

This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.

This is an abstract virtual class for memory operations.

MachineMemOperand * getMemOperand() const

Return a MachineMemOperand object describing the memory reference performed by operation.

EVT getMemoryVT() const

Return the type of the in-memory value.

unsigned getMaxRequiredAlignment() const

bool hasAtomMinMax64() const

bool hasAtomAddF64() const

const NVPTXTargetLowering * getTargetLowering() const override

unsigned getMinCmpXchgSizeInBits() const

unsigned getPTXVersion() const

bool hasNativeBF16Support(int Opcode) const

const NVPTXRegisterInfo * getRegisterInfo() const override

unsigned int getSmVersion() const

bool hasAtomBitwise64() const

bool allowFP16Math() const

ConstraintType getConstraintType(StringRef Constraint) const override

getConstraintType - Given a constraint letter, return the type of constraint it is for this target.

SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override

This callback is invoked for operations that are unsupported by the target, which are registered to u...

const NVPTXTargetMachine * nvTM

SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const

NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)

bool useF32FTZ(const MachineFunction &MF) const

SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const

Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const

SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override

Hooks for building estimates in place of slower divisions and square roots.

SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override

This hook must be implemented to lower outgoing return values, described by the Outs array,...

SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override

This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...

void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override

Lower the specified operand into the Ops vector.

SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const

std::string getParamName(const Function *F, int Idx) const

TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override

Return the preferred vector type legalization action.

std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, MaybeAlign retAlignment, std::optional< std::pair< unsigned, const APInt & > > VAInfo, const CallBase &CB, unsigned UniqueCallSite) const

Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const

getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...

SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const

EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override

Return the ValueType of the result of SETCC operations.

std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override

Given a physical register constraint (e.g.

bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override

isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...

AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override

Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.

Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const

Helper for computing alignment of a device function byval parameter.

bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override

Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...

const char * getTargetNodeName(unsigned Opcode) const override

This method returns the name of a target specific DAG node.

bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const

bool usePrecSqrtF32() const

unsigned getJumpTableEncoding() const override

Return the entry encoding for a jump table in the current function.

bool allowUnsafeFPMath(MachineFunction &MF) const

int getDivF32Level() const

SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override

This hook must be implemented to lower calls into the specified DAG.

UniqueStringSaver & getStrPool() const

MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override

~NVPTXTargetObjectFile() override

static PointerType * get(Type *ElementType, unsigned AddressSpace)

This constructs a pointer to an object of the specified type in a numbered address space.

Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...

Represents one node in the SelectionDAG.

const APInt & getAsAPIntVal() const

Helper method returns the APInt value of a ConstantSDNode.

unsigned getOpcode() const

Return the SelectionDAG opcode value for this node.

bool hasOneUse() const

Return true if there is exactly one use of this node.

unsigned getIROrder() const

Return the node ordering.

uint64_t getAsZExtVal() const

Helper method returns the zero-extended integer value of a ConstantSDNode.

unsigned getNumOperands() const

Return the number of values used by this operation.

SDVTList getVTList() const

const SDValue & getOperand(unsigned Num) const

uint64_t getConstantOperandVal(unsigned Num) const

Helper method returns the integer value of a ConstantSDNode operand.

const APInt & getConstantOperandAPInt(unsigned Num) const

Helper method returns the APInt of a ConstantSDNode operand.

EVT getValueType(unsigned ResNo) const

Return the type of a specified result.

bool isUndef() const

Return true if the type of the node type undefined.

iterator_range< user_iterator > users()

Represents a use of a SDNode.

Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.

SDNode * getNode() const

get the SDNode which holds the desired result

SDValue getValue(unsigned R) const

EVT getValueType() const

Return the ValueType of the referenced return value.

TypeSize getValueSizeInBits() const

Returns the size of the value in bits.

const SDValue & getOperand(unsigned i) const

MVT getSimpleValueType() const

Return the simple ValueType of the referenced return value.

unsigned getOpcode() const

SectionKind - This is a simple POD value that classifies the properties of a section.

This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...

SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)

const SDValue & getRoot() const

Return the root tag of the SelectionDAG.

SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)

Return an AddrSpaceCastSDNode.

SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)

SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)

Create a MERGE_VALUES node from the given operands.

SDVTList getVTList(EVT VT)

Return an SDVTList that represents the list of values specified.

void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())

Append the extracted elements from Start to Count out of the vector Op in Args.

SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)

Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...

SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)

Return a GlobalAddress of the function from the current module with name matching the given ExternalS...

SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)

Create a ConstantFPSDNode wrapping a constant value.

SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)

Loads are not normal binary operators: their result type is not determined by their operands,...

const TargetLowering & getTargetLoweringInfo() const

SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)

This mutates the specified node to have the specified return type, opcode, and operands.

SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)

Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).

SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)

Return an ISD::BUILD_VECTOR node.

SDValue getBitcast(EVT VT, SDValue V)

Return a bitcast using the SDLoc of the value operand, and casting to the provided type.

SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)

SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())

Helper function to make it easier to build Select's if you just have operands and don't want to check...

const DataLayout & getDataLayout() const

SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

Create a ConstantSDNode wrapping a constant value.

SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

void ReplaceAllUsesWith(SDValue From, SDValue To)

Modify anything using 'From' to use 'To' instead.

SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

Helper function to build ISD::STORE nodes.

SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)

Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...

void RemoveDeadNode(SDNode *N)

Remove the specified node from the system.

SDValue getBasicBlock(MachineBasicBlock *MBB)

SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)

Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...

SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)

Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...

SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)

SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)

Gets or creates the specified node.

SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)

Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...

SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)

MachineFunction & getMachineFunction() const

SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)

Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...

LLVMContext * getContext() const

const SDValue & setRoot(SDValue N)

Set the current root tag of the SelectionDAG.

SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())

Creates a MemIntrinsicNode that may produce a result and takes a list of operands.

SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)

SDValue getEntryNode() const

Return the token chain corresponding to the entry of the function.

This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...

ArrayRef< int > getMask() const

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void assign(size_type NumElts, ValueParamT Elt)

void append(ItTy in_start, ItTy in_end)

Add the specified range to the end of the SmallVector.

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

This class is used to represent ISD::STORE nodes.

StringRef - Represent a constant reference to a string, i.e.

constexpr size_t size() const

size - Get the string size.

constexpr const char * data() const

data - Get a pointer to the start of the string (which may not be null terminated).

Class to represent struct types.

void setBooleanVectorContents(BooleanContent Ty)

Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...

void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)

Indicate that the specified operation does not work with the specified type and indicate what to do a...

void setMaxDivRemBitWidthSupported(unsigned SizeInBits)

Set the size in bits of the maximum div/rem the backend supports.

EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const

Return the EVT corresponding to this LLVM type.

LegalizeAction

This enum indicates whether operations are valid for a target, and if not, what action should be used...

unsigned MaxStoresPerMemcpyOptSize

Likewise for functions with the OptSize attribute.

virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const

Return the register class that should be used for the specified value type.

const TargetMachine & getTargetMachine() const

void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)

Convenience method to set an operation to Promote and specify the type in a single call.

LegalizeTypeAction

This enum indicates whether a types are legal for a target, and if not, what action should be used to...

void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)

Tells the code generator which bitwidths to bypass.

virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const

Return the number of registers that this ValueType will eventually require.

void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)

Set the maximum atomic operation size supported by the backend.

virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const

Return the preferred vector type legalization action.

unsigned MaxStoresPerMemsetOptSize

Likewise for functions with the OptSize attribute.

void setBooleanContents(BooleanContent Ty)

Specify how the target extends the result of integer and floating point boolean values from i1 to a w...

unsigned MaxStoresPerMemmove

Specify maximum number of store instructions per memmove call.

void computeRegisterProperties(const TargetRegisterInfo *TRI)

Once all of the register classes are added, this allows us to compute derived properties we expose.

unsigned MaxStoresPerMemmoveOptSize

Likewise for functions with the OptSize attribute.

void addRegisterClass(MVT VT, const TargetRegisterClass *RC)

Add the specified register class as an available regclass for the specified value type.

virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const

Return the pointer type for the given address space, defaults to the pointer type from the data layou...

unsigned MaxStoresPerMemset

Specify maximum number of store instructions per memset call.

void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified truncating store does not work with the specified type and indicate what ...

@ ZeroOrNegativeOneBooleanContent

void setMinCmpXchgSizeInBits(unsigned SizeInBits)

Sets the minimum cmpxchg or ll/sc size supported by the backend.

void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)

If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...

AtomicExpansionKind

Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.

void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)

Indicate that the specified condition code is or isn't supported on the target and indicate what to d...

void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)

Targets should invoke this method for each target independent node that they want to provide a custom...

Align getMinStackArgumentAlignment() const

Return the minimum stack alignment of an argument.

void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified load with extension does not work with the specified type and indicate wh...

std::vector< ArgListEntry > ArgListTy

bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const

This function returns true if the memory access is aligned or if the target allows this specific unal...

unsigned MaxStoresPerMemcpy

Specify maximum number of store instructions per memcpy call.

void setSchedulingPreference(Sched::Preference Pref)

Specify the target scheduling preference.

void setJumpIsExpensive(bool isExpensive=true)

Tells the code generator not to expand logic operations on comparison predicates into separate sequen...

LegalizeAction getOperationAction(unsigned Op, EVT VT) const

Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...

This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...

SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const

Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.

virtual ConstraintType getConstraintType(StringRef Constraint) const

Given a constraint, return the type of constraint it is for this target.

std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const

Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.

virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const

Given a physical register constraint (e.g.

SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const

Truncate Op to ResultVT.

SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const

Expand round(fp) to fp conversion.

virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const

Lower the specified operand into the Ops vector.

Primary interface to the complete machine description for the target machine.

CodeGenOptLevel getOptLevel() const

Returns the optimization level: None, Less, Default, or Aggressive.

MCSymbol * getSymbol(const GlobalValue *GV) const

unsigned UnsafeFPMath

UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...

FPOpFusion::FPOpFusionMode AllowFPOpFusion

AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

The instances of the Type class are immutable: once they are created, they are never changed.

bool isVectorTy() const

True if this is an instance of VectorType.

bool isFloatTy() const

Return true if this is 'float', a 32-bit IEEE fp type.

bool isBFloatTy() const

Return true if this is 'bfloat', a 16-bit bfloat type.

@ VoidTyID

type with no size

bool isAggregateType() const

Return true if the type is an aggregate type.

bool isHalfTy() const

Return true if this is 'half', a 16-bit IEEE fp type.

bool isDoubleTy() const

Return true if this is 'double', a 64-bit IEEE fp type.

bool isFloatingPointTy() const

Return true if this is one of the floating-point types.

bool isIntegerTy() const

True if this is an instance of IntegerType.

TypeID getTypeID() const

Return the type id for the type.

TypeSize getPrimitiveSizeInBits() const LLVM_READONLY

Return the basic size of this type if it is a primitive type.

StringRef save(const char *S)

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

int getNumOccurrences() const

A raw_ostream that writes to an std::string.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

APInt pow(const APInt &X, int64_t N)

Compute X^N for N>=0.

@ C

The default llvm calling convention, compatible with C.

NodeType

ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.

@ SETCC

SetCC operator - This evaluates to a true value iff the condition is true.

@ STACKRESTORE

STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.

@ STACKSAVE

STACKSAVE - STACKSAVE has one operand, an input chain.

@ SMUL_LOHI

SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...

@ BSWAP

Byte Swap and Counting operators.

@ VAEND

VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.

@ ADDC

Carry-setting nodes for multiple precision addition and subtraction.

@ ADD

Simple integer binary arithmetic operators.

@ LOAD

LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...

@ ANY_EXTEND

ANY_EXTEND - Used for integer types. The high bits are undefined.

@ FMA

FMA - Perform a * b + c with no intermediate rounding step.

@ INTRINSIC_VOID

OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...

@ SINT_TO_FP

[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...

@ CONCAT_VECTORS

CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...

@ FADD

Simple binary floating point operators.

@ ABS

ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.

@ SDIVREM

SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.

@ BITCAST

BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...

@ BUILD_PAIR

BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.

@ SIGN_EXTEND

Conversion operators.

@ READSTEADYCOUNTER

READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.

@ FNEG

Perform various unary floating-point operations inspired by libm.

@ BR_CC

BR_CC - Conditional branch.

@ SSUBO

Same for subtraction.

@ BRIND

BRIND - Indirect branch.

@ BR_JT

BR_JT - Jumptable branch.

@ SSUBSAT

RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...

@ SELECT

Select(COND, TRUEVAL, FALSEVAL).

@ UNDEF

UNDEF - An undefined node.

@ VACOPY

VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...

@ CopyFromReg

CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...

@ SADDO

RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.

@ MULHU

MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...

@ SHL

Shift and rotation operations.

@ VECTOR_SHUFFLE

VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.

@ EXTRACT_SUBVECTOR

EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.

@ FMINNUM_IEEE

FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...

@ EXTRACT_VECTOR_ELT

EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...

@ CopyToReg

CopyToReg - This node has three operands: a chain, a register number to set to this value,...

@ ZERO_EXTEND

ZERO_EXTEND - Used for integer types, zeroing the new bits.

@ DEBUGTRAP

DEBUGTRAP - Trap intended to get the attention of a debugger.

@ SELECT_CC

Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...

@ FMINNUM

FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.

@ SSHLSAT

RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.

@ SMULO

Same for multiplication.

@ DYNAMIC_STACKALLOC

DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.

@ SIGN_EXTEND_INREG

SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...

@ SMIN

[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.

@ FP_EXTEND

X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.

@ VSELECT

Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...

@ UADDO_CARRY

Carry-using nodes for multiple precision addition and subtraction.

@ BF16_TO_FP

BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.

@ FRAMEADDR

FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.

@ FMINIMUM

FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....

@ FP_TO_SINT

FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.

@ READCYCLECOUNTER

READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.

@ AND

Bitwise operators - logical and, logical or, logical xor.

@ TRAP

TRAP - Trapping instruction.

@ INTRINSIC_WO_CHAIN

RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...

@ ADDE

Carry-using nodes for multiple precision addition and subtraction.

@ FREEZE

FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...

@ INSERT_VECTOR_ELT

INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.

@ TokenFactor

TokenFactor - This node takes multiple tokens as input and produces a single token result.

@ FP_ROUND

X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...

@ TRUNCATE

TRUNCATE - Completely drop the high bits.

@ VAARG

VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.

@ SHL_PARTS

SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.

@ FCOPYSIGN

FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.

@ SADDSAT

RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...

@ SADDO_CARRY

Carry-using overflow-aware nodes for multiple precision addition and subtraction.

@ INTRINSIC_W_CHAIN

RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...

@ BUILD_VECTOR

BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...

bool allOperandsUndef(const SDNode *N)

Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...

@ Bitcast

Perform the operation on a different, but equivalently sized type.

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

static bool isIndirectCall(const MachineInstr &MI)

bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)

Get the size of a range.

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

MaybeAlign getAlign(const Function &F, unsigned Index)

uint64_t PowerOf2Ceil(uint64_t A)

Returns the power of two which is greater than or equal to the given value.

OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)

Wrapper function around std::transform to apply a function to a range and store the result elsewhere.

constexpr bool isPowerOf2_32(uint32_t Value)

Return true if the argument is a power of two > 0.

unsigned promoteScalarArgumentSize(unsigned size)

void report_fatal_error(Error Err, bool gen_crash_diag=true)

Report a serious error, calling any installed error handler.

CodeGenOptLevel

Code generation optimization level.

@ Mul

Product of integers.

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

DWARFExpression::Operation Op

void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())

ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...

constexpr unsigned BitWidth

bool isKernelFunction(const Function &F)

Function * getMaybeBitcastedCallee(const CallBase *CB)

Align commonAlignment(Align A, uint64_t Offset)

Returns the alignment that satisfies both alignments.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

static const fltSemantics & IEEEsingle() LLVM_READNONE

This struct is a compact representation of a valid (non-zero power of two) alignment.

uint64_t value() const

This is a hole in the type system and should not be abused.

@ PreserveSign

The sign of a flushed-to-zero number is preserved in the sign of 0.

DenormalModeKind Output

Denormal flushing mode for floating point instruction results in the default floating point environme...

TypeSize getStoreSize() const

Return the number of bytes overwritten by a store of the specified value type.

bool isSimple() const

Test if the given EVT is simple (as opposed to being extended).

static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)

Returns the EVT that represents a vector NumElements in length, where each element is of type VT.

EVT changeTypeToInteger() const

Return the type converted to an equivalently sized integer or vector with integer element type.

bool isFloatingPoint() const

Return true if this is a FP or a vector FP type.

ElementCount getVectorElementCount() const

TypeSize getSizeInBits() const

Return the size of the specified value type in bits.

uint64_t getScalarSizeInBits() const

MVT getSimpleVT() const

Return the SimpleValueType held in the specified simple EVT.

uint64_t getFixedSizeInBits() const

Return the size of the specified fixed width value type in bits.

bool isVector() const

Return true if this is a vector value type.

EVT getScalarType() const

If this is a vector type, return the element type, otherwise return this.

bool bitsEq(EVT VT) const

Return true if this has the same number of bits as VT.

Type * getTypeForEVT(LLVMContext &Context) const

This method returns an LLVM type corresponding to the specified EVT.

EVT getVectorElementType() const

Given a vector type, return the type of each element.

bool isScalarInteger() const

Return true if this is an integer, but not a vector.

EVT changeVectorElementType(EVT EltVT) const

Return a VT for a vector type whose attributes match ourselves with the exception of the element type...

unsigned getVectorNumElements() const

Given a vector type, return the number of elements it contains.

bool isInteger() const

Return true if this is an integer or a vector integer type.

This class contains a discriminated union of information about pointers in memory operands,...

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.

This represents a list of ValueType's that has been intern'd by a SelectionDAG.

This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...

This structure contains all information that is necessary for lowering calls.

SmallVector< ISD::InputArg, 32 > Ins

SmallVector< ISD::OutputArg, 32 > Outs

SmallVector< SDValue, 32 > OutVals

bool isAfterLegalizeDAG() const

SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)