LLVM: lib/Target/NVPTX/NVPTXISelLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

51#include "llvm/IR/IntrinsicsNVPTX.h"

66#include

67#include

68#include

69#include

70#include

71#include

72#include

73#include

74#include

75#include

76

77#define DEBUG_TYPE "nvptx-lower"

78

79using namespace llvm;

80

82 "nvptx-sched4reg",

83 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));

84

87 cl::desc("NVPTX Specific: FMA contraction (0: don't do it"

88 " 1: do it 2: do it aggressively"),

90

94 "NVPTX Specific: Override the precision of the lowering for f32 fdiv"),

99 "Use IEEE Compliant F32 div.rnd if available (default)"),

101 "Use IEEE Compliant F32 div.rnd if available, no FTZ")),

103

106 cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),

108

109

110

112 "nvptx-approx-log2f32",

113 cl::desc("NVPTX Specific: whether to use lg2.approx for log2"),

115

117 "nvptx-force-min-byval-param-align", cl::Hidden,

118 cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval"

119 " params of device functions."),

121

125

128

130 if (Flags.hasApproximateFuncs())

132

134}

135

137

140

141 if (N) {

143 if (Flags.hasApproximateFuncs())

144 return false;

145 }

146

147 return true;

148}

149

154

157 default:

158 return false;

159 case MVT::v2i1:

160 case MVT::v4i1:

161 case MVT::v2i8:

162 case MVT::v4i8:

163 case MVT::v8i8:

164 case MVT::v16i8:

165 case MVT::v2i16:

166 case MVT::v4i16:

167 case MVT::v8i16:

168 case MVT::v2i32:

169 case MVT::v4i32:

170 case MVT::v2i64:

171 case MVT::v2f16:

172 case MVT::v4f16:

173 case MVT::v8f16:

174 case MVT::v2bf16:

175 case MVT::v4bf16:

176 case MVT::v8bf16:

177 case MVT::v2f32:

178 case MVT::v4f32:

179 case MVT::v2f64:

180 case MVT::v4i64:

181 case MVT::v4f64:

182 case MVT::v8i32:

183 case MVT::v8f32:

184 case MVT::v16f16:

185 case MVT::v16bf16:

186 case MVT::v16i16:

187 case MVT::v32i8:

188 return true;

189 }

190}

191

192

193

194

195

196

197

198

199static std::optional<std::pair<unsigned int, MVT>>

203

206 return {{4, MVT::i64}};

207

209 return std::nullopt;

211

213 if (VectorVT == MVT::i128 || VectorVT == MVT::f128)

214 return {{2, MVT::i64}};

215 return std::nullopt;

216 }

217

220

221

222 unsigned PackRegSize;

223

224

225

226

228 default:

229 return std::nullopt;

230

231 case MVT::v4i64:

232 case MVT::v4f64:

233

234

235 if (!CanLowerTo256Bit)

236 return std::nullopt;

237 [[fallthrough]];

238 case MVT::v2i8:

239 case MVT::v2i64:

240 case MVT::v2f64:

241

242 return std::pair(NumElts, EltVT);

243

244 case MVT::v16f16:

245 case MVT::v16bf16:

246 case MVT::v16i16:

247 case MVT::v32i8:

248

249

250 if (!CanLowerTo256Bit)

251 return std::nullopt;

252 [[fallthrough]];

253 case MVT::v2i16:

254 case MVT::v2f16:

255 case MVT::v2bf16:

256 case MVT::v4i8:

257 case MVT::v4i16:

258 case MVT::v4f16:

259 case MVT::v4bf16:

260 case MVT::v8i8:

261 case MVT::v8f16:

262 case MVT::v8bf16:

263 case MVT::v8i16:

264 case MVT::v16i8:

265 PackRegSize = 32;

266 break;

267

268 case MVT::v8f32:

269 case MVT::v8i32:

270

271

272 if (!CanLowerTo256Bit)

273 return std::nullopt;

274 [[fallthrough]];

275 case MVT::v2f32:

276 case MVT::v4f32:

277 case MVT::v2i32:

278 case MVT::v4i32:

280 return std::pair(NumElts, EltVT);

281 PackRegSize = 64;

282 break;

283 }

284

285

286

287

288

289

290 const unsigned NPerReg = PackRegSize / EltVT.getSizeInBits();

291

292 return std::pair(NumElts / NPerReg, MVT::getVectorVT(EltVT, NPerReg));

293}

294

295

296

297

298

299

300

301

306 uint64_t StartingOffset = 0) {

309 ComputeValueVTs(TLI, DL, Ty, TempVTs, nullptr, &TempOffsets,

310 StartingOffset);

311

312 for (const auto [VT, Off] : zip(TempVTs, TempOffsets)) {

315

316

317

318 if (VT.getScalarType() == MVT::i8) {

319 if (RegisterVT == MVT::i16)

320 RegisterVT = MVT::i8;

321 else if (RegisterVT == MVT::v2i16)

322 RegisterVT = MVT::v2i8;

323 else

324 assert(RegisterVT == MVT::v4i8 &&

325 "Expected v4i8, v2i16, or i16 for i8 RegisterVT");

326 }

327

328

329

330

331

332 for (unsigned I : seq(NumRegs)) {

334 Offsets.push_back(Off + I * RegisterVT.getStoreSize());

335 }

336 }

337}

338

339

340

341

343 if (N == 1)

344 return VT;

345

349}

350

353 if (V.getValueType() == VT) {

354 assert(I == 0 && "Index must be 0 for scalar value");

355 return V;

356 }

357

361

365}

366

367template

370 if (N == 1)

371 return GetElement(0);

372

378 else

380 }

381

383 Values.size());

385}

386

387

388

389

390

391

395 default:

397 "Promotion is not suitable for scalars of size larger than 64-bits");

398 case 1:

399 return MVT::i1;

400 case 2:

401 case 4:

402 case 8:

403 return MVT::i8;

404 case 16:

405 return MVT::i16;

406 case 32:

407 return MVT::i32;

408 case 64:

409 return MVT::i64;

410 }

411 }

412 return VT;

413}

414

415

416

417

418

419

420

421

422

423

424

425template

429

430

431 if (ParamAlignment < AccessSize)

432 return 1;

433

434 if (Offsets[Idx] & (AccessSize - 1))

435 return 1;

436

437 EVT EltVT = ValueVTs[Idx];

439

440

441 if (EltSize >= AccessSize)

442 return 1;

443

444 unsigned NumElts = AccessSize / EltSize;

445

446 if (AccessSize != EltSize * NumElts)

447 return 1;

448

449

450 if (Idx + NumElts > ValueVTs.size())

451 return 1;

452

453

454 if (NumElts != 4 && NumElts != 2)

455 return 1;

456

457 for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {

458

459 if (ValueVTs[j] != EltVT)

460 return 1;

461

462

463 if (Offsets[j] - Offsets[j - 1] != EltSize)

464 return 1;

465 }

466

467 return NumElts;

468}

469

470

471

472

473

474

475

476

477

478template

482 bool IsVAArg = false) {

483

484

485

486 if (IsVAArg)

488

490

491 const auto GetNumElts = [&](unsigned I) -> unsigned {

492 for (const unsigned AccessSize : {16, 8, 4, 2}) {

494 I, AccessSize, ValueVTs, Offsets, ParamAlignment);

495 assert((NumElts == 1 || NumElts == 2 || NumElts == 4) &&

496 "Unexpected vectorization size");

497 if (NumElts != 1)

498 return NumElts;

499 }

500 return 1;

501 };

502

503

504 for (unsigned I = 0, E = ValueVTs.size(); I != E;) {

505 const unsigned NumElts = GetNumElts(I);

506 VectorInfo.push_back(NumElts);

507 I += NumElts;

508 }

509 assert(std::accumulate(VectorInfo.begin(), VectorInfo.end(), 0u) ==

510 ValueVTs.size());

511 return VectorInfo;

512}

513

514

517 : TargetLowering(TM, STI), nvTM(&TM), STI(STI), GlobalUniqueCallSite(0) {

518

519

520

524

527

528

529

531

532

533

535

536

539 else

541

542 auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,

544 bool IsOpSupported = STI.allowFP16Math();

545 switch (Op) {

546

547 case ISD::FMINNUM:

548 case ISD::FMAXNUM:

549 case ISD::FMAXNUM_IEEE:

550 case ISD::FMINNUM_IEEE:

551 case ISD::FMAXIMUM:

552 case ISD::FMINIMUM:

553 case ISD::FMAXIMUMNUM:

554 case ISD::FMINIMUMNUM:

555 IsOpSupported &= STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;

556 break;

557 case ISD::FEXP2:

558 IsOpSupported &= STI.getSmVersion() >= 75 && STI.getPTXVersion() >= 70;

559 break;

560 }

562 };

563

564 auto setBF16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,

566 bool IsOpSupported = STI.hasNativeBF16Support(Op);

568 Op, VT, IsOpSupported ? Action : NoBF16Action);

569 };

570

571 auto setI16x2OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,

573 bool IsOpSupported = false;

574

575 switch (Op) {

581 IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 80;

582 break;

583 }

585 };

586

599

600 if (STI.hasF32x2Instructions()) {

603 }

604

605

610

612 if (STI.getSmVersion() >= 30 && STI.getPTXVersion() > 31)

614

617

618

623

628

629

634

639

640

643

647

648

649 if (STI.hasF32x2Instructions())

652

653

655

656

657

674 {MVT::v4i8, MVT::v2i32}, Expand);

675

676

677 for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,

678 MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,

679 MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) {

682 }

683

684

686

687

688

695

702

705

707 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},

709

710 if (STI.hasHWROT32()) {

714 }

715

718

719

720

722

723

724 for (auto FloatVTs :

726 for (MVT ValVT : FloatVTs) {

727 for (MVT MemVT : FloatVTs) {

730 }

731 }

732 }

733

734

735

736

737 for (auto IntVTs :

739 for (MVT ValVT : IntVTs)

740 for (MVT MemVT : IntVTs)

743

744

750 }

751

752

753

754

755

756

757

761 {MVT::v2i8, MVT::v2i16}, Expand);

765

766

767

768

769 setOperationAction({ISD::LOAD, ISD::STORE}, {MVT::i128, MVT::i256, MVT::f128},

772 if (isTypeLegal(VT) && VT.getStoreSizeInBits() <= 256)

773 setOperationAction({ISD::STORE, ISD::LOAD, ISD::MSTORE, ISD::MLOAD}, VT,

775

776

777

778

783

788

789

794

797

798

800

802

803

808

810 {MVT::i16, MVT::i32, MVT::i64}, Legal);

811

816

824

831

832

836 {MVT::v2i16, MVT::v2i32}, Expand);

837

838

844

849 if (STI.getPTXVersion() >= 43) {

854 }

855

860

861

863

864

867

868

871 ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM,

872 ISD::FMAXIMUM, ISD::FMINIMUM, ISD::FMAXIMUMNUM,

877

878

879

880 if (STI.allowFP16Math() || STI.hasBF16Math())

882

883

884

887 if (EltVT == MVT::f32 || EltVT == MVT::f64) {

889 ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM},

891 }

892 }

893

894

895

896

897

898

899

901 setFP16OperationAction(Op, MVT::f16, Legal, Promote);

902 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);

903 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);

904

905 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);

909 STI.hasF32x2Instructions() ? Legal : Expand);

910 }

911

912

914 for (const auto &VT : {MVT::bf16, MVT::v2bf16}) {

915 if (!STI.hasNativeBF16Support(Op) && STI.hasNativeBF16Support(ISD::FMA)) {

917 }

918 }

919 }

920

921

922 const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 &&

923 STI.getPTXVersion() >= 60 &&

924 STI.allowFP16Math();

925 for (const auto &VT : {MVT::f16, MVT::v2f16})

927 IsFP16FP16x2NegAvailable ? Legal : Expand);

928

929 setBF16OperationAction(ISD::FNEG, MVT::bf16, Legal, Expand);

930 setBF16OperationAction(ISD::FNEG, MVT::v2bf16, Legal, Expand);

932

933

934

935 for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,

936 ISD::FROUNDEVEN, ISD::FTRUNC}) {

943 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);

946 }

947

948 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71) {

950 }

951 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {

952 for (MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {

955 }

956 }

957

958

960

962

963

964

965 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {

966 for (MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {

970 }

974 }

975

983

984

991

992

993

994

995 for (const auto &Op :

996 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FTANH}) {

999

1002 }

1006 }

1008

1011 if (STI.getPTXVersion() >= 65) {

1012 setFP16OperationAction(ISD::FABS, MVT::f16, Legal, Promote);

1013 setFP16OperationAction(ISD::FABS, MVT::v2f16, Legal, Expand);

1014 } else {

1017 }

1018 setBF16OperationAction(ISD::FABS, MVT::v2bf16, Legal, Expand);

1019 setBF16OperationAction(ISD::FABS, MVT::bf16, Legal, Promote);

1022

1023 for (const auto &Op :

1024 {ISD::FMINNUM, ISD::FMAXNUM, ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM}) {

1027 setFP16OperationAction(Op, MVT::f16, Legal, Promote);

1028 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);

1029 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);

1030 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);

1034 }

1035 bool SupportsF32MinMaxNaN =

1036 STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;

1037 for (const auto &Op : {ISD::FMINIMUM, ISD::FMAXIMUM}) {

1039 setFP16OperationAction(Op, MVT::f16, Legal, Expand);

1040 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);

1041 setBF16OperationAction(Op, MVT::bf16, Legal, Expand);

1042 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);

1044 }

1045

1046

1049

1050

1051

1052

1053

1054

1057 setFP16OperationAction(ISD::FEXP2, MVT::f16, Legal, Promote);

1058 setFP16OperationAction(ISD::FEXP2, MVT::v2f16, Legal, Expand);

1059 setBF16OperationAction(ISD::FEXP2, MVT::bf16, Legal, Promote);

1060 setBF16OperationAction(ISD::FEXP2, MVT::v2bf16, Legal, Expand);

1061

1062

1063

1068 setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16, MVT::v2f32},

1070 }

1071

1073

1075

1076

1077

1078 setOperationAction({ISD::ATOMIC_CMP_SWAP, ISD::ATOMIC_SWAP}, MVT::i128,

1080

1081

1082

1084

1085

1089

1090

1092 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,

1093 MVT::v32i32, MVT::v64i32, MVT::v128i32},

1095

1096

1098 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,

1099 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::Other},

1101

1102

1103

1104

1105

1106

1108 {MVT::i32, MVT::i128, MVT::v4f32, MVT::Other}, Custom);

1109

1110

1113}

1114

1122

1124 int Enabled, int &ExtraSteps,

1125 bool &UseOneConst,

1126 bool Reciprocal) const {

1130

1132 ExtraSteps = 0;

1133

1137

1138 auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {

1141 };

1142

1143

1144

1145

1146

1147 if (Reciprocal || ExtraSteps > 0) {

1148 if (VT == MVT::f32)

1149 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f

1150 : Intrinsic::nvvm_rsqrt_approx_f);

1151 else if (VT == MVT::f64)

1152 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);

1153 else

1155 } else {

1156 if (VT == MVT::f32)

1157 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f

1158 : Intrinsic::nvvm_sqrt_approx_f);

1159 else {

1160

1161

1162

1163

1166 DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),

1167 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));

1168 }

1169 }

1170}

1171

1175 std::optional FirstVAArg, const CallBase &CB,

1176 unsigned UniqueCallSite) const {

1178

1179 std::string Prototype;

1181 O << "prototype_" << UniqueCallSite << " : .callprototype ";

1182

1184 O << "()";

1185 } else {

1186 O << "(";

1188 const Align RetAlign = getArgumentAlignment(&CB, RetTy, 0, DL);

1189 O << ".param .align " << RetAlign.value() << " .b8 _["

1190 << DL.getTypeAllocSize(RetTy) << "]";

1192 unsigned size = 0;

1194 size = ITy->getBitWidth();

1195 } else {

1197 "Floating point type expected here");

1199 }

1200

1201

1202

1204

1205 O << ".param .b" << size << " _";

1207 O << ".param .b" << PtrVT.getSizeInBits() << " _";

1208 } else {

1210 }

1211 O << ") ";

1212 }

1213 O << "_ (";

1214

1215 bool first = true;

1216

1217 const unsigned NumArgs = FirstVAArg.value_or(Args.size());

1218 auto AllOuts = ArrayRef(Outs);

1219 for (const unsigned I : llvm::seq(NumArgs)) {

1220 const auto ArgOuts =

1221 AllOuts.take_while([I](auto O) { return O.OrigArgIndex == I; });

1222 AllOuts = AllOuts.drop_front(ArgOuts.size());

1223

1224 Type *Ty = Args[I].Ty;

1225 if (!first) {

1226 O << ", ";

1227 }

1228 first = false;

1229

1230 if (ArgOuts[0].Flags.isByVal()) {

1231

1232

1233 Type *ETy = Args[I].IndirectType;

1234 Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();

1235 Align ParamByValAlign =

1237

1238 O << ".param .align " << ParamByValAlign.value() << " .b8 _["

1239 << ArgOuts[0].Flags.getByValSize() << "]";

1240 } else {

1242 Align ParamAlign =

1243 getArgumentAlignment(&CB, Ty, I + AttributeList::FirstArgIndex, DL);

1244 O << ".param .align " << ParamAlign.value() << " .b8 _["

1245 << DL.getTypeAllocSize(Ty) << "]";

1246 continue;

1247 }

1248

1250 (getValueType(DL, Ty) == MVT::i8 && ArgOuts[0].VT == MVT::i16)) &&

1251 "type mismatch between callee prototype and arguments");

1252

1253 unsigned sz = 0;

1257 sz = PtrVT.getSizeInBits();

1258 } else {

1259 sz = Ty->getPrimitiveSizeInBits();

1260 }

1261 O << ".param .b" << sz << " _";

1262 }

1263 }

1264

1265 if (FirstVAArg)

1266 O << (first ? "" : ",") << " .param .align "

1267 << STI.getMaxRequiredAlignment() << " .b8 _[]";

1268 O << ")";

1270 O << " .noreturn";

1271 O << ";";

1272

1273 return Prototype;

1274}

1275

1280

1281Align NVPTXTargetLowering::getArgumentAlignment(const CallBase *CB, Type *Ty,

1282 unsigned Idx,

1284 if (!CB) {

1285

1286 return DL.getABITypeAlign(Ty);

1287 }

1288

1290

1291 if (!DirectCallee) {

1292

1293

1294

1295

1297

1299 return StackAlign.value();

1300 }

1302 }

1303

1304

1305

1306 if (DirectCallee)

1308

1309

1310 return DL.getABITypeAlign(Ty);

1311}

1312

1315 if (!Func)

1316 return false;

1318 return CB->getFunctionType() != CalleeFunc->getFunctionType();

1319 return false;

1320}

1321

1329

1331 }

1332

1333

1334

1335 if (Ptr->getOpcode() == ISD::ADDRSPACECAST) {

1340 }

1341 }

1342

1344}

1345

1347 if (Flags.isSExt())

1349 if (Flags.isZExt())

1352}

1353

1357 const EVT ActualVT = V.getValueType();

1358 assert((ActualVT == ExpectedVT ||

1360 "Non-integer argument type size mismatch");

1361 if (ExpectedVT.bitsGT(ActualVT))

1363 if (ExpectedVT.bitsLT(ActualVT))

1365

1366 return V;

1367}

1368

1371

1372 if (CLI.IsVarArg && (STI.getPTXVersion() < 60 || STI.getSmVersion() < 30))

1374 "Support for variadic functions (unsized array parameter) introduced "

1375 "in PTX ISA version 6.0 and requires target sm_30.");

1376

1386

1387 const auto GetI32 = [&](const unsigned I) {

1389 };

1390

1391 const unsigned UniqueCallSite = GlobalUniqueCallSite++;

1393 const SDValue StartChain =

1396

1398

1399 const auto MakeDeclareScalarParam = [&](SDValue Symbol, unsigned Size) {

1400

1401

1404 DAG.getNode(NVPTXISD::DeclareScalarParam, dl, {MVT::Other, MVT::Glue},

1405 {StartChain, Symbol, GetI32(SizeBits), DeclareGlue});

1407 DeclareGlue = Declare.getValue(1);

1408 return Declare;

1409 };

1410

1411 const auto MakeDeclareArrayParam = [&](SDValue Symbol, Align Align,

1412 unsigned Size) {

1414 NVPTXISD::DeclareArrayParam, dl, {MVT::Other, MVT::Glue},

1415 {StartChain, Symbol, GetI32(Align.value()), GetI32(Size), DeclareGlue});

1417 DeclareGlue = Declare.getValue(1);

1418 return Declare;

1419 };

1420

1421

1422

1423

1424

1425

1426

1427

1428

1429

1430

1431

1432

1433

1434

1435

1437 "Non-VarArg function with extra arguments");

1438

1439 const unsigned FirstVAArg = CLI.NumFixedArgs;

1440 unsigned VAOffset = 0;

1441

1442 const SDValue VADeclareParam =

1443 CLI.Args.size() > FirstVAArg

1444 ? MakeDeclareArrayParam(getCallParamSymbol(DAG, FirstVAArg, MVT::i32),

1445 Align(STI.getMaxRequiredAlignment()), 0)

1447

1448

1449

1450

1451

1452

1453

1454

1455

1456

1459 assert(AllOuts.size() == AllOutVals.size() &&

1460 "Outs and OutVals must be the same size");

1461

1462

1464 const auto ArgI = E.index();

1465 const auto Arg = E.value();

1466 const auto ArgOuts =

1467 AllOuts.take_while([&](auto O) { return O.OrigArgIndex == ArgI; });

1468 const auto ArgOutVals = AllOutVals.take_front(ArgOuts.size());

1469 AllOuts = AllOuts.drop_front(ArgOuts.size());

1470 AllOutVals = AllOutVals.drop_front(ArgOuts.size());

1471

1472 const bool IsVAArg = (ArgI >= FirstVAArg);

1473 const bool IsByVal = Arg.IsByVal;

1474

1475 const SDValue ParamSymbol =

1476 getCallParamSymbol(DAG, IsVAArg ? FirstVAArg : ArgI, MVT::i32);

1477

1478 assert((!IsByVal || Arg.IndirectType) &&

1479 "byval arg must have indirect type");

1480 Type *ETy = (IsByVal ? Arg.IndirectType : Arg.Ty);

1481

1482 const Align ArgAlign = [&]() {

1483 if (IsByVal) {

1484

1485

1486

1487 const Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();

1489 InitialAlign, DL);

1490 }

1491 return getArgumentAlignment(CB, Arg.Ty, ArgI + 1, DL);

1492 }();

1493

1494 const unsigned TySize = DL.getTypeAllocSize(ETy);

1495 assert((!IsByVal || TySize == ArgOuts[0].Flags.getByValSize()) &&

1496 "type size mismatch");

1497

1498 const SDValue ArgDeclare = [&]() {

1499 if (IsVAArg)

1500 return VADeclareParam;

1501

1503 return MakeDeclareArrayParam(ParamSymbol, ArgAlign, TySize);

1504

1505 assert(ArgOuts.size() == 1 && "We must pass only one value as non-array");

1506 assert((ArgOuts[0].VT.isInteger() || ArgOuts[0].VT.isFloatingPoint()) &&

1507 "Only int and float types are supported as non-array arguments");

1508

1509 return MakeDeclareScalarParam(ParamSymbol, TySize);

1510 }();

1511

1512 if (IsByVal) {

1513 assert(ArgOutVals.size() == 1 && "We must pass only one value as byval");

1514 SDValue SrcPtr = ArgOutVals[0];

1515 const auto PointerInfo = refinePtrAS(SrcPtr, DAG, DL, *this);

1516 const Align BaseSrcAlign = ArgOuts[0].Flags.getNonZeroByValAlign();

1517

1518 if (IsVAArg)

1519 VAOffset = alignTo(VAOffset, ArgAlign);

1520

1524

1525 unsigned J = 0;

1527 for (const unsigned NumElts : VI) {

1532 DAG.getLoad(LoadVT, dl, CallChain, SrcAddr, PointerInfo, SrcAlign);

1533

1534 TypeSize ParamOffset = Offsets[J].getWithIncrement(VAOffset);

1539 DAG.getStore(ArgDeclare, dl, SrcLoad, ParamAddr,

1541 CallPrereqs.push_back(StoreParam);

1542

1543 J += NumElts;

1544 }

1545 if (IsVAArg)

1546 VAOffset += TySize;

1547 } else {

1551 VAOffset);

1552 assert(VTs.size() == Offsets.size() && "Size mismatch");

1553 assert(VTs.size() == ArgOuts.size() && "Size mismatch");

1554

1555

1556

1557

1558

1559 const bool ExtendIntegerParam =

1560 Arg.Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Arg.Ty) < 32;

1561

1562 const auto GetStoredValue = [&](const unsigned I) {

1563 SDValue StVal = ArgOutVals[I];

1566 "OutVal type should always be legal");

1567

1569 const EVT StoreVT =

1570 ExtendIntegerParam ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);

1571

1572 return correctParamType(StVal, StoreVT, ArgOuts[I].Flags, DAG, dl);

1573 };

1574

1575 unsigned J = 0;

1577 for (const unsigned NumElts : VI) {

1579

1581 if (IsVAArg) {

1582

1583

1584 assert(NumElts == 1 &&

1585 "Vectorization should be disabled for vaargs.");

1586

1587

1590

1591 const EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;

1592 VAOffset += DL.getTypeAllocSize(TheStoreType.getTypeForEVT(Ctx));

1593 } else {

1594 assert(VAOffset == 0 && "VAOffset must be 0 for non-VA args");

1595 Offset = Offsets[J];

1596 }

1597

1600

1601 const MaybeAlign CurrentAlign = ExtendIntegerParam

1604

1607 return GetStoredValue(J + K);

1608 });

1609

1611 DAG.getStore(ArgDeclare, dl, Val, Ptr,

1613 CallPrereqs.push_back(StoreParam);

1614

1615 J += NumElts;

1616 }

1617 }

1618 }

1619

1620

1621 if (!Ins.empty()) {

1623 const unsigned ResultSize = DL.getTypeAllocSize(RetTy);

1625 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);

1626 MakeDeclareArrayParam(RetSymbol, RetAlign, ResultSize);

1627 } else {

1628 MakeDeclareScalarParam(RetSymbol, ResultSize);

1629 }

1630 }

1631

1632

1633

1634 if (VADeclareParam) {

1637 VADeclareParam.getOperand(2), GetI32(VAOffset),

1640 VADeclareParam->getVTList(), DeclareParamOps);

1641 }

1642

1644

1645

1647

1648

1649

1650

1651 const bool IsIndirectCall = (!Func && CB) || ConvertToIndirectCall;

1652

1654 Function* CalleeFunc = nullptr;

1655

1656

1658 assert(CalleeFunc != nullptr && "Libcall callee must be set.");

1659

1660

1661

1662 CalleeFunc->addFnAttr("nvptx-libcall-callee", "true");

1663 }

1664

1665 if (IsIndirectCall) {

1666

1667

1668

1669

1670

1671

1672

1674 std::string Proto =

1676 HasVAArgs ? std::optional(FirstVAArg) : std::nullopt, *CB,

1677 UniqueCallSite);

1678 const char *ProtoStr = nvTM->getStrPool().save(Proto).data();

1680 NVPTXISD::CallPrototype, dl, MVT::Other,

1682 CallPrereqs.push_back(PrototypeDeclare);

1683 }

1684

1685 const unsigned Proto = IsIndirectCall ? UniqueCallSite : 0;

1686 const unsigned NumArgs =

1687 std::min(CLI.NumFixedArgs + 1, Args.size());

1688

1689

1692 NVPTXISD::CALL, dl, MVT::Other,

1693 {CallToken, GetI32(CLI.IsConvergent), GetI32(IsIndirectCall),

1694 GetI32(Ins.empty() ? 0 : 1), GetI32(NumArgs), Callee, GetI32(Proto)});

1695

1698 if (!Ins.empty()) {

1702 assert(VTs.size() == Ins.size() && "Bad value decomposition");

1703

1704 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);

1706

1707

1708

1709

1710 const bool ExtendIntegerRetVal =

1711 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;

1712

1713 unsigned I = 0;

1715 for (const unsigned NumElts : VI) {

1717 ExtendIntegerRetVal ? MaybeAlign(std::nullopt)

1719

1721 const EVT LoadVT =

1722 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);

1726

1730

1731 LoadChains.push_back(R.getValue(1));

1732 for (const unsigned J : llvm::seq(NumElts))

1734 I += NumElts;

1735 }

1736 }

1737

1740 UniqueCallSite + 1, SDValue(), dl);

1741

1742

1743

1744

1747 DAG.getNode(NVPTXISD::ProxyReg, dl, Reg.getValueType(), {CallEnd, Reg});

1750 }

1751

1752

1753

1755 return CallEnd;

1756}

1757

1760

1761 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {

1763

1765 Fn,

1766 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "

1767 "requires target sm_52.",

1770 Op.getOperand(0)};

1772 }

1773

1775 SDValue Chain = Op.getOperand(0);

1778

1779

1780

1783

1784

1786

1788 DAG.getNode(NVPTXISD::DYNAMIC_STACKALLOC, DL, {LocalVT, MVT::Other},

1791

1794

1796}

1797

1801 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {

1803

1805 Fn,

1806 "Support for stackrestore requires PTX ISA version >= 7.3 and target "

1807 ">= sm_52.",

1808 DL.getDebugLoc()));

1809 return Op.getOperand(0);

1810 }

1811

1813 SDValue Chain = Op.getOperand(0);

1817 return DAG.getNode(NVPTXISD::STACKRESTORE, DL, MVT::Other, {Chain, ASC});

1818}

1819

1823 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {

1825

1827 Fn,

1828 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "

1829 "sm_52.",

1830 DL.getDebugLoc()));

1833 }

1834

1836 SDValue Chain = Op.getOperand(0);

1838 DAG.getNode(NVPTXISD::STACKSAVE, DL, {LocalVT, MVT::Other}, Chain);

1842}

1843

1844

1845

1846

1852 unsigned NumOperands = Node->getNumOperands();

1853 for (unsigned i = 0; i < NumOperands; ++i) {

1855 EVT VVT = SubOp.getNode()->getValueType(0);

1858 for (unsigned j = 0; j < NumSubElem; ++j) {

1861 }

1862 }

1864}

1865

1869 assert(A.getValueType() == MVT::i32 && B.getValueType() == MVT::i32 &&

1870 Selector.getValueType() == MVT::i32 && "PRMT must have i32 operands");

1871 return DAG.getNode(NVPTXISD::PRMT, DL, MVT::i32,

1873}

1874

1880

1881

1882

1883

1884

1885

1888 ArrayRef<std::pair<unsigned /*NodeType*/, unsigned /*NumInputs*/>> Ops,

1890

1892

1893 unsigned OpIdx = 0;

1894 while (Level.size() > 1) {

1895

1896 const auto [Op, NumInputs] = Ops[OpIdx];

1897

1898

1900 unsigned I = 0, E = Level.size();

1901 for (; I + NumInputs <= E; I += NumInputs) {

1902

1905 }

1906

1907 if (I < E) {

1908

1909

1910 if (ReducedLevel.empty()) {

1911

1912

1914 assert(OpIdx < Ops.size() && "no smaller operators for reduction");

1915 continue;

1916 }

1917

1918

1919

1920

1921 for (; I < E; ++I)

1923 }

1924

1925

1926 Level = ReducedLevel;

1927 }

1928

1929 return *Level.begin();

1930}

1931

1932

1934 switch (ReductionOpcode) {

1935 case ISD::VECREDUCE_FMAX:

1936 return ISD::FMAXNUM;

1937 case ISD::VECREDUCE_FMIN:

1938 return ISD::FMINNUM;

1939 case ISD::VECREDUCE_FMAXIMUM:

1940 return ISD::FMAXIMUM;

1941 case ISD::VECREDUCE_FMINIMUM:

1942 return ISD::FMINIMUM;

1943 default:

1945 }

1946}

1947

1948

1949static std::optional

1951 switch (ReductionOpcode) {

1952 case ISD::VECREDUCE_FMAX:

1953 return NVPTXISD::FMAXNUM3;

1954 case ISD::VECREDUCE_FMIN:

1955 return NVPTXISD::FMINNUM3;

1956 case ISD::VECREDUCE_FMAXIMUM:

1957 return NVPTXISD::FMAXIMUM3;

1958 case ISD::VECREDUCE_FMINIMUM:

1959 return NVPTXISD::FMINIMUM3;

1960 default:

1961 return std::nullopt;

1962 }

1963}

1964

1965

1966

1967

1970 SDLoc DL(Op);

1971 const SDNodeFlags Flags = Op->getFlags();

1973

1974 const unsigned Opcode = Op->getOpcode();

1975 const EVT EltTy = Vector.getValueType().getVectorElementType();

1976

1977

1978 const bool CanUseMinMax3 =

1979 EltTy == MVT::f32 && STI.getSmVersion() >= 100 &&

1980 STI.getPTXVersion() >= 88 &&

1981 (Opcode == ISD::VECREDUCE_FMAX || Opcode == ISD::VECREDUCE_FMIN ||

1982 Opcode == ISD::VECREDUCE_FMAXIMUM || Opcode == ISD::VECREDUCE_FMINIMUM);

1983

1984

1985

1986 SmallVector<std::pair<unsigned , unsigned >, 2> ScalarOps;

1987

1989 CanUseMinMax3 && Opcode3Elem)

1990 ScalarOps.push_back({*Opcode3Elem, 3});

1992

1995

1997}

1998

2000

2001

2002 EVT FromVT = Op->getOperand(0)->getValueType(0);

2003 if (FromVT != MVT::v2i8) {

2004 return Op;

2005 }

2006

2007

2008 SDLoc DL(Op);

2018 {Extend0, DAG.getNode(ISD::SHL, DL, MVT::i16, {Extend1, Const8})});

2019 EVT ToVT = Op->getValueType(0);

2021}

2022

2023

2024

2025

2026

2029 EVT VT = Op->getValueType(0);

2031 return Op;

2032 SDLoc DL(Op);

2033

2035 return Operand->isUndef() || isa(Operand) ||

2036 isa(Operand);

2037 })) {

2038 if (VT != MVT::v4i8)

2039 return Op;

2040

2041

2043 uint64_t SelectionValue) -> SDValue {

2046 if (Cast) {

2049 }

2050 return getPRMT(L, R, SelectionValue, DL, DAG);

2051 };

2052 auto PRMT__10 = GetPRMT(Op->getOperand(0), Op->getOperand(1), true, 0x3340);

2053 auto PRMT__32 = GetPRMT(Op->getOperand(2), Op->getOperand(3), true, 0x3340);

2054 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32, false, 0x5410);

2055 return DAG.getBitcast(VT, PRMT3210);

2056 }

2057

2058

2059 auto GetOperand = [](SDValue Op, int N) -> APInt {

2060 const SDValue &Operand = Op->getOperand(N);

2061 EVT VT = Op->getValueType(0);

2063 return APInt(32, 0);

2065 if (VT == MVT::v2f16 || VT == MVT::v2bf16)

2067 else if (VT == MVT::v2i16 || VT == MVT::v4i8)

2069 else

2071

2072

2073 if (VT == MVT::v4i8)

2075 return Value.zext(32);

2076 };

2077

2078

2079

2080

2081

2082

2083

2084

2085

2086

2087

2088

2089

2090

2091 APInt Value(32, 0);

2093 assert(32 % NumElements == 0 && "must evenly divide bit length");

2094 const unsigned ShiftAmount = 32 / NumElements;

2095 for (unsigned ElementNo : seq(NumElements))

2096 Value |= GetOperand(Op, ElementNo).shl(ElementNo * ShiftAmount);

2098 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), Const);

2099}

2100

2101SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,

2105 SDLoc DL(Op);

2106 EVT VectorVT = Vector.getValueType();

2107

2108 if (VectorVT == MVT::v4i8) {

2115 SDNodeFlags Flags;

2119 return Ext;

2120 }

2121

2122

2124 return Op;

2125

2126

2130

2131 SDLoc dl(Op.getNode());

2138}

2139

2140SDValue NVPTXTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,

2143 EVT VectorVT = Vector.getValueType();

2144

2145 if (VectorVT != MVT::v4i8)

2146 return Op;

2147 SDLoc DL(Op);

2149 if (Value->isUndef())

2151

2153

2155 DAG.getNode(NVPTXISD::BFI, DL, MVT::i32,

2161 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), BFI);

2162}

2163

2168 if (VectorVT != MVT::v4i8 || Op.getValueType() != MVT::v4i8)

2169 return Op;

2170

2171

2174 uint32_t Selector = 0;

2176 if (I.value() != -1)

2177 Selector |= (I.value() << (I.index() * 4));

2178 }

2179

2180 SDLoc DL(Op);

2182 DAG.getBitcast(MVT::i32, V2), Selector, DL, DAG);

2183 return DAG.getBitcast(Op.getValueType(), PRMT);

2184}

2185

2186

2187

2188

2189

2190SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,

2194

2195 EVT VT = Op.getValueType();

2197 SDLoc dl(Op);

2198 SDValue ShOpLo = Op.getOperand(0);

2199 SDValue ShOpHi = Op.getOperand(1);

2200 SDValue ShAmt = Op.getOperand(2);

2202

2203 if (VTBits == 32 && STI.getSmVersion() >= 35) {

2204

2205

2206

2207

2208

2211 DAG.getNode(NVPTXISD::FSHR_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);

2212

2215 }

2216 else {

2217

2218

2219

2220

2221

2222

2223

2224

2227 ShAmt);

2234

2240

2243 }

2244}

2245

2246

2247

2248

2249

2250

2255

2256 EVT VT = Op.getValueType();

2258 SDLoc dl(Op);

2259 SDValue ShOpLo = Op.getOperand(0);

2260 SDValue ShOpHi = Op.getOperand(1);

2261 SDValue ShAmt = Op.getOperand(2);

2262

2263 if (VTBits == 32 && STI.getSmVersion() >= 35) {

2264

2265

2266

2267

2268

2270 DAG.getNode(NVPTXISD::FSHL_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);

2272

2275 }

2276 else {

2277

2278

2279

2280

2281

2282

2283

2284

2287 ShAmt);

2294

2300

2303 }

2304}

2305

2306

2307

2310 EVT VT = Op.getValueType();

2311 SDLoc DL(Op);

2312

2316

2317 if (!SrcVT.bitsEq(VT))

2319

2320 return DAG.getNode(NVPTXISD::FCOPYSIGN, DL, VT, In1, In2);

2321}

2322

2324 EVT VT = Op.getValueType();

2325

2326 if (VT == MVT::f32)

2327 return LowerFROUND32(Op, DAG);

2328

2329 if (VT == MVT::f64)

2330 return LowerFROUND64(Op, DAG);

2331

2333}

2334

2335

2336

2337

2338

2339

2340

2341

2344 SDLoc SL(Op);

2346 EVT VT = Op.getValueType();

2347

2349

2350

2352 const unsigned SignBitMask = 0x80000000;

2354 DAG.getConstant(SignBitMask, SL, MVT::i32));

2355 const unsigned PointFiveInBits = 0x3F000000;

2356 SDValue PointFiveWithSignRaw =

2358 DAG.getConstant(PointFiveInBits, SL, MVT::i32));

2359 SDValue PointFiveWithSign =

2360 DAG.getNode(ISD::BITCAST, SL, VT, PointFiveWithSignRaw);

2362 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);

2363

2364

2370

2371

2374 SDValue RoundedAForSmallA = DAG.getNode(ISD::FTRUNC, SL, VT, A);

2375 return DAG.getNode(ISD::SELECT, SL, VT, IsSmall, RoundedAForSmallA, RoundedA);

2376}

2377

2378

2379

2380

2381

2382

2385 SDLoc SL(Op);

2387 EVT VT = Op.getValueType();

2388

2390

2391

2394 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);

2395

2396

2402 RoundedA);

2403

2404

2406 DAG.getNode(ISD::FTRUNC, SL, VT, A);

2407

2408

2413}

2414

2416 EVT VT = N->getValueType(0);

2417 EVT NVT = MVT::f32;

2420 }

2424 SDValue Res = DAG.getNode(N->getOpcode(), DL, NVT, Tmp0, Tmp1, N->getFlags());

2426}

2427

2428SDValue NVPTXTargetLowering::PromoteBinOpIfF32FTZ(SDValue Op,

2432 }

2433 return Op;

2434}

2435

2438 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);

2439

2440 if (Op.getValueType() == MVT::bf16) {

2441 SDLoc Loc(Op);

2444 DAG.getNode(Op.getOpcode(), Loc, MVT::f32, Op.getOperand(0)),

2446 }

2447

2448

2449 return Op;

2450}

2451

2454 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);

2455

2456 if (Op.getOperand(0).getValueType() == MVT::bf16) {

2457 SDLoc Loc(Op);

2459 Op.getOpcode(), Loc, Op.getValueType(),

2460 DAG.getNode(ISD::FP_EXTEND, Loc, MVT::f32, Op.getOperand(0)));

2461 }

2462

2463

2464 return Op;

2465}

2466

2469 EVT NarrowVT = Op.getValueType();

2470 SDValue Wide = Op.getOperand(0);

2473 const TargetLowering *TLI = STI.getTargetLowering();

2474 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 70) {

2476 }

2477 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {

2478

2479 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70) {

2481 return Op;

2482 }

2484 SDLoc Loc(Op);

2485

2486

2489 : MVT::f32,

2490 Wide, Loc, DAG);

2492 }

2493 }

2495 }

2496 }

2497

2498

2499 return Op;

2500}

2501

2504 SDValue Narrow = Op.getOperand(0);

2506 EVT WideVT = Op.getValueType();

2509 (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71)) {

2510 SDLoc Loc(Op);

2511 return DAG.getNode(ISD::BF16_TO_FP, Loc, WideVT, Narrow);

2512 }

2514 (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78)) {

2516 : MVT::f32;

2517 SDLoc Loc(Op);

2518 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 71) {

2519 Op = DAG.getNode(ISD::FP_EXTEND, Loc, F32, Narrow);

2520 } else {

2521 Op = DAG.getNode(ISD::BF16_TO_FP, Loc, F32, Narrow);

2522 }

2523 return DAG.getNode(ISD::FP_EXTEND, Loc, WideVT, Op);

2524 }

2525 }

2526

2527

2528 return Op;

2529}

2530

2533 if (Op.getValueType() != MVT::v2i16)

2534 return Op;

2535 EVT EltVT = Op.getValueType().getVectorElementType();

2537 for (int I = 0, E = Op.getValueType().getVectorNumElements(); I < E; I++) {

2540 [&](const SDUse &O) {

2541 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,

2542 O.get(), DAG.getIntPtrConstant(I, DL));

2543 });

2545 }

2548 return V;

2549}

2550

2555

2556

2557 for (size_t I = 0; I < N->getNumOperands(); I++) {

2565 } else

2566 Ops.push_back(Val);

2567 }

2568

2573

2574 return Tcgen05StNode;

2575}

2576

2580 EVT VT = Op.getValueType();

2581

2583 case MVT::i16: {

2588 }

2589 case MVT::i32: {

2591 }

2592 case MVT::v2i16: {

2596 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i16, Swapped);

2597 }

2598 case MVT::i64: {

2603 DL, DAG);

2606 DL, DAG);

2607 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64,

2608 {SwappedHigh, SwappedLow});

2609 }

2610 default:

2612 }

2613}

2614

2616 switch (IID) {

2617 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:

2618 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1;

2619 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:

2620 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2;

2621 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:

2622 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;

2623 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:

2624 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;

2625 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:

2626 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1;

2627 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:

2628 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2;

2629 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:

2630 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;

2631 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:

2632 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;

2633 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:

2634 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;

2635 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:

2636 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;

2637 case Intrinsic::

2638 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:

2639 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;

2640 case Intrinsic::

2641 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:

2642 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;

2643 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:

2644 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1;

2645 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:

2646 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2;

2647 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:

2648 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;

2649 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:

2650 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;

2651 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:

2652 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1;

2653 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:

2654 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2;

2655 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:

2656 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;

2657 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:

2658 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;

2659 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:

2660 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;

2661 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:

2662 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;

2663 case Intrinsic::

2664 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:

2665 return NVPTXISD::

2666 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;

2667 case Intrinsic::

2668 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:

2669 return NVPTXISD::

2670 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;

2671 };

2672 llvm_unreachable("unhandled tcgen05.mma.disable_output_lane intrinsic");

2673}

2674

2679

2681

2682 for (size_t I = 0; I < N->getNumOperands(); I++) {

2683 if (I == 1)

2684 continue;

2692 } else

2693 Ops.push_back(Val);

2694 }

2695

2700

2701 return Tcgen05MMANode;

2702}

2703

2704

2705static std::optional<std::pair<SDValue, SDValue>>

2708 EVT ResVT = N->getValueType(0);

2710 return {};

2711

2713

2714

2716 for (unsigned i = 0; i < NumElts; ++i)

2718

2719 ListVTs.push_back(N->getValueType(1));

2720

2722

2724 N->getOperand(2)};

2725

2726 if (HasOffset) {

2727 Ops.push_back(N->getOperand(3));

2728 Ops.push_back(N->getOperand(4));

2729 } else

2730 Ops.push_back(N->getOperand(3));

2731

2736

2737

2739 for (unsigned i = 0; i < NumElts; ++i) {

2742 }

2743

2746 return {{BuildVector, Chain}};

2747}

2748

2751 SDValue Intrin = N->getOperand(1);

2752

2753

2755 switch (IntrinNo) {

2756 default:

2757 break;

2758 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:

2759 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:

2760 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:

2761 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:

2762 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:

2763 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:

2764 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:

2765 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:

2766 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:

2767 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:

2768 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:

2769 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:

2770 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:

2771 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:

2772 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:

2773 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:

2774 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:

2775 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:

2776 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:

2777 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:

2778 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1:

2779 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2:

2780 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4:

2781 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8:

2782 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16:

2783 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32:

2784 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64:

2785 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128:

2786 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:

2787 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:

2788 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:

2789 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:

2790 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:

2791 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:

2792 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:

2793 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:

2794 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:

2796 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:

2797 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:

2798 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:

2799 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:

2800 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:

2801 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:

2802 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:

2803 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:

2804 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:

2805 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:

2806 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:

2807 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:

2808 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:

2809 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:

2810 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:

2811 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:

2812 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:

2813 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:

2814 case Intrinsic::

2815 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:

2816 case Intrinsic::

2817 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:

2818 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:

2819 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:

2820 case Intrinsic::

2821 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:

2822 case Intrinsic::

2823 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:

2825 }

2826 return Op;

2827}

2828

2831

2833 if (N->getOperand(1).getValueType() != MVT::i128) {

2834

2836 }

2837

2838 unsigned IID =

2840 auto Opcode = [&]() {

2841 switch (IID) {

2842 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:

2843 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED;

2844 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:

2845 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X;

2846 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:

2847 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y;

2848 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:

2849 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z;

2850 default:

2852 }

2853 }();

2854

2856 SDValue TryCancelResponse = N->getOperand(1);

2857 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TryCancelResponse);

2858 SDValue TryCancelResponse0 =

2861 SDValue TryCancelResponse1 =

2864

2865 return DAG.getNode(Opcode, DL, N->getVTList(),

2866 {TryCancelResponse0, TryCancelResponse1});

2867}

2868

2872 SDValue F32Vec = N->getOperand(1);

2873 SDValue RBits = N->getOperand(2);

2874

2875 unsigned IntrinsicID = N->getConstantOperandVal(0);

2876

2877

2879 for (unsigned i = 0; i < 4; ++i)

2882

2884

2885 auto [OpCode, RetTy, CvtModeFlag] =

2886 [&]() -> std::tuple<unsigned, MVT::SimpleValueType, uint32_t> {

2887 switch (IntrinsicID) {

2888 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:

2889 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8,

2890 CvtMode::RS | CvtMode::RELU_FLAG};

2891 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:

2892 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};

2893 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:

2894 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8,

2895 CvtMode::RS | CvtMode::RELU_FLAG};

2896 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:

2897 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};

2898 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:

2899 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8,

2900 CvtMode::RS | CvtMode::RELU_FLAG};

2901 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:

2902 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};

2903 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:

2904 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8,

2905 CvtMode::RS | CvtMode::RELU_FLAG};

2906 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:

2907 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};

2908 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:

2909 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16,

2910 CvtMode::RS | CvtMode::RELU_FLAG};

2911 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:

2912 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16, CvtMode::RS};

2913 default:

2915 }

2916 }();

2917

2918 Ops.push_back(RBits);

2920

2922}

2923

2925 const unsigned Mode = [&]() {

2926 switch (Op->getConstantOperandVal(0)) {

2927 case Intrinsic::nvvm_prmt:

2929 case Intrinsic::nvvm_prmt_b4e:

2931 case Intrinsic::nvvm_prmt_ecl:

2933 case Intrinsic::nvvm_prmt_ecr:

2935 case Intrinsic::nvvm_prmt_f4e:

2937 case Intrinsic::nvvm_prmt_rc16:

2939 case Intrinsic::nvvm_prmt_rc8:

2941 default:

2943 }

2944 }();

2947 SDValue B = Op.getNumOperands() == 4 ? Op.getOperand(2)

2949 SDValue Selector = (Op->op_end() - 1)->get();

2951}

2952

2954 switch (Op->getConstantOperandVal(1)) {

2955 default:

2956 return Op;

2957

2958

2959

2960 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:

2961 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:

2962 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:

2966

2967 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:

2968 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG, true))

2971 }

2972}

2973

2975 switch (Op->getConstantOperandVal(0)) {

2976 default:

2977 return Op;

2978 case Intrinsic::nvvm_prmt:

2979 case Intrinsic::nvvm_prmt_b4e:

2980 case Intrinsic::nvvm_prmt_ecl:

2981 case Intrinsic::nvvm_prmt_ecr:

2982 case Intrinsic::nvvm_prmt_f4e:

2983 case Intrinsic::nvvm_prmt_rc16:

2984 case Intrinsic::nvvm_prmt_rc8:

2986 case Intrinsic::nvvm_internal_addrspace_wrap:

2987 return Op.getOperand(1);

2988 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:

2989 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:

2990 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:

2991 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:

2993 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:

2994 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:

2995 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:

2996 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:

2997 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:

2998 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:

2999 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:

3000 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:

3001 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:

3002 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:

3004 }

3005}

3006

3007

3008

3009

3012 assert(V.getValueType() == MVT::i64 &&

3013 "Unexpected CTLZ/CTPOP type to legalize");

3014

3018}

3019

3022 assert(A.getValueType() == MVT::i64 && B.getValueType() == MVT::i64);

3023

3025 if (!AmtConst)

3027 const auto Amt = AmtConst->getZExtValue() & 63;

3028

3033

3034

3039

3040

3041

3042

3043

3044

3045

3046

3047

3048

3049

3050

3051

3052

3053 auto [High, Mid, Low] = ((Opcode == ISD::FSHL) == (Amt < 32))

3054 ? std::make_tuple(AHi, ALo, BHi)

3055 : std::make_tuple(ALo, BHi, BLo);

3056

3060

3061 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64, {RLo, RHi});

3062}

3063

3068

3074

3076

3077

3078

3082 EVT Ty = Op.getValueType();

3084

3086 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, Ty, Div, Flags);

3091

3092 if (Flags.hasNoInfs())

3093 return Sub;

3094

3095

3101}

3102

3104 assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");

3105

3107 SDValue TrueVal = Op->getOperand(1);

3108 SDValue FalseVal = Op->getOperand(2);

3110

3111

3114 TrueVal = TrueVal.getOperand(0);

3115 FalseVal = FalseVal.getOperand(0);

3116

3117 EVT VT = TrueVal.getSimpleValueType().bitsLE(FalseVal.getSimpleValueType())

3118 ? TrueVal.getValueType()

3119 : FalseVal.getValueType();

3124 }

3125

3126

3127

3128 TrueVal = DAG.getFreeze(TrueVal);

3129 FalseVal = DAG.getFreeze(FalseVal);

3134 return Or;

3135}

3136

3139

3140 SDValue Chain = N->getOperand(0);

3141 SDValue Val = N->getOperand(1);

3142 SDValue BasePtr = N->getOperand(2);

3144 SDValue Mask = N->getOperand(4);

3145

3149 assert(ValVT.isVector() && "Masked vector store must have vector type");

3151 "Unexpected alignment for masked store");

3152

3153 unsigned Opcode = 0;

3155 default:

3157 case MVT::v4i64:

3158 case MVT::v4f64: {

3160 break;

3161 }

3162 case MVT::v8i32:

3163 case MVT::v8f32: {

3165 break;

3166 }

3167 }

3168

3170

3171

3172 Ops.push_back(Chain);

3173

3174

3175

3176 assert(Mask.getValueType().isVector() &&

3177 Mask.getValueType().getVectorElementType() == MVT::i1 &&

3178 "Mask must be a vector of i1");

3180 "Mask expected to be a BUILD_VECTOR");

3181 assert(Mask.getValueType().getVectorNumElements() ==

3183 "Mask size must be the same as the vector size");

3184 for (auto [I, Op] : enumerate(Mask->ops())) {

3185

3186 if (Op.getNode()->getAsZExtVal() == 0) {

3187

3188

3191 } else {

3192

3196 Ops.push_back(ExtVal);

3197 }

3198 }

3199

3200

3201 Ops.push_back(BasePtr);

3202

3203

3204

3205

3207 "Offset operand expected to be undef");

3209

3213

3214 return NewSt;

3215}

3216

3219 switch (Op.getOpcode()) {

3224 case ISD::ADDRSPACECAST:

3225 return LowerADDRSPACECAST(Op, DAG);

3233 return LowerBUILD_VECTOR(Op, DAG);

3234 case ISD::BITCAST:

3235 return LowerBITCAST(Op, DAG);

3237 return Op;

3239 return LowerEXTRACT_VECTOR_ELT(Op, DAG);

3241 return LowerINSERT_VECTOR_ELT(Op, DAG);

3243 return LowerVECTOR_SHUFFLE(Op, DAG);

3245 return LowerCONCAT_VECTORS(Op, DAG);

3246 case ISD::VECREDUCE_FMAX:

3247 case ISD::VECREDUCE_FMIN:

3248 case ISD::VECREDUCE_FMAXIMUM:

3249 case ISD::VECREDUCE_FMINIMUM:

3250 return LowerVECREDUCE(Op, DAG);

3251 case ISD::STORE:

3252 return LowerSTORE(Op, DAG);

3253 case ISD::MSTORE: {

3254 assert(STI.has256BitVectorLoadStore(

3256 "Masked store vector not supported on subtarget.");

3258 }

3259 case ISD::LOAD:

3260 return LowerLOAD(Op, DAG);

3261 case ISD::MLOAD:

3262 return LowerMLOAD(Op, DAG);

3264 return LowerShiftLeftParts(Op, DAG);

3267 return LowerShiftRightParts(Op, DAG);

3270 case ISD::FROUND:

3271 return LowerFROUND(Op, DAG);

3273 return LowerFCOPYSIGN(Op, DAG);

3276 return LowerINT_TO_FP(Op, DAG);

3279 return LowerFP_TO_INT(Op, DAG);

3281 return LowerFP_ROUND(Op, DAG);

3282 case ISD::FP_EXTEND:

3283 return LowerFP_EXTEND(Op, DAG);

3284 case ISD::BR_JT:

3285 return LowerBR_JT(Op, DAG);

3286 case ISD::VAARG:

3287 return LowerVAARG(Op, DAG);

3288 case ISD::VASTART:

3289 return LowerVASTART(Op, DAG);

3308 case ISD::DYNAMIC_STACKALLOC:

3310 case ISD::STACKRESTORE:

3312 case ISD::STACKSAVE:

3315 return LowerCopyToReg_128(Op, DAG);

3319

3320 return PromoteBinOpIfF32FTZ(Op, DAG);

3328 default:

3329 llvm_unreachable("Custom lowering not defined for operation");

3330 }

3331}

3332

3335 SDValue Chain = Op.getOperand(0);

3337 SDValue Index = Op.getOperand(2);

3338

3339 unsigned JId = JT->getIndex();

3342

3344

3345

3347 Chain = DAG.getNode(NVPTXISD::BrxStart, DL, VTs, Chain, IdV);

3348

3349

3352 Chain = DAG.getNode(NVPTXISD::BrxItem, DL, VTs, Chain.getValue(0),

3354

3355

3358 SDValue BrxEnd = DAG.getNode(NVPTXISD::BrxEnd, DL, MVT::Other, EndOps);

3359

3360 return BrxEnd;

3361}

3362

3363

3367

3371 unsigned SrcAS = N->getSrcAddressSpace();

3372 unsigned DestAS = N->getDestAddressSpace();

3375

3376

3382 const MVT GenerictVT =

3386 SDValue SharedClusterConversion =

3389 return SharedClusterConversion;

3390 }

3391

3392 return DAG.getUNDEF(Op.getValueType());

3393 }

3394

3395 return Op;

3396}

3397

3398

3399

3401 const TargetLowering *TLI = STI.getTargetLowering();

3402 SDLoc DL(Op);

3403

3404 SDNode *Node = Op.getNode();

3406 EVT VT = Node->getValueType(0);

3410 const MaybeAlign MA(Node->getConstantOperandVal(3));

3411

3413 Tmp1, Tmp2, MachinePointerInfo(V));

3414 SDValue VAList = VAListLoad;

3415

3420

3424 }

3425

3426

3430

3431

3433 MachinePointerInfo(V));

3434

3437

3438

3439 return DAG.getLoad(VT, DL, Tmp1, VAList, MachinePointerInfo(SrcV));

3440}

3441

3443 const TargetLowering *TLI = STI.getTargetLowering();

3444 SDLoc DL(Op);

3446

3447

3448 SDValue VAReg = getParamSymbol(DAG, -1, PtrVT);

3449

3451 return DAG.getStore(Op.getOperand(0), DL, VAReg, Op.getOperand(1),

3452 MachinePointerInfo(SV));

3453}

3454

3455static std::pair<MemSDNode *, uint32_t>

3457 SDValue Chain = N->getOperand(0);

3458 SDValue BasePtr = N->getOperand(1);

3459 SDValue Mask = N->getOperand(3);

3460 [[maybe_unused]] SDValue Passthru = N->getOperand(4);

3461

3463 EVT ResVT = N->getValueType(0);

3464 assert(ResVT.isVector() && "Masked vector load must have vector type");

3465

3466

3467

3470 "Passthru operand expected to be poison or undef");

3471

3472

3473

3476 assert(ElementSizeInBits % 8 == 0 && "Unexpected element size");

3477 uint32_t ElementSizeInBytes = ElementSizeInBits / 8;

3478 uint32_t ElementMask = (1u << ElementSizeInBytes) - 1u;

3479

3481

3482

3483

3484 UsedBytesMask <<= ElementSizeInBytes;

3485

3486

3487 if (Op->getAsZExtVal() != 0)

3488 UsedBytesMask |= ElementMask;

3489 }

3490

3491 assert(UsedBytesMask != 0 && UsedBytesMask != UINT32_MAX &&

3492 "Unexpected masked load with elements masked all on or all off");

3493

3494

3496 DAG.getLoad(ResVT, DL, Chain, BasePtr, N->getMemOperand()).getNode());

3497

3498 return {NewLD, UsedBytesMask};

3499}

3500

3501

3502static std::optional<std::pair<SDValue, SDValue>>

3505 const EVT ResVT = LD->getValueType(0);

3506 const EVT MemVT = LD->getMemoryVT();

3507

3508

3509

3510 if (ResVT != MemVT)

3511 return std::nullopt;

3512

3513 const auto NumEltsAndEltVT =

3515 if (!NumEltsAndEltVT)

3516 return std::nullopt;

3517 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();

3518

3519 Align Alignment = LD->getAlign();

3522 if (Alignment < PrefAlign) {

3523

3524

3525

3526

3527

3528 return std::nullopt;

3529 }

3530

3531

3532 std::optional<uint32_t> UsedBytesMask = std::nullopt;

3533 if (LD->getOpcode() == ISD::MLOAD)

3535

3536

3537

3538

3539 const MVT LoadEltVT = (EltVT.getSizeInBits() < 16) ? MVT::i16 : EltVT;

3540

3541 unsigned Opcode;

3542 switch (NumElts) {

3543 default:

3544 return std::nullopt;

3545 case 2:

3547 break;

3548 case 4:

3550 break;

3551 case 8:

3553 break;

3554 }

3556 ListVTs.push_back(MVT::Other);

3558

3560

3561

3563

3565 DAG.getConstant(UsedBytesMask.value_or(UINT32_MAX), DL, MVT::i32));

3566

3567

3568

3571

3573 LD->getMemOperand());

3574

3580

3581

3582 for (const unsigned I : llvm::seq(NumElts)) {

3585 }

3586 } else {

3587 for (const unsigned I : llvm::seq(NumElts)) {

3589 if (LoadEltVT != EltVT)

3592 }

3593 }

3594

3596

3597 const MVT BuildVecVT =

3601

3602 return {{LoadValue, LoadChain}};

3603}

3604

3609 Results.append({Res->first, Res->second});

3610}

3611

3618

3619

3620

3621

3622

3626 assert(LD->getValueType(0) == MVT::i1 && "Custom lowering for i1 load only");

3628 LD->getBasePtr(), LD->getPointerInfo(),

3629 MVT::i8, LD->getAlign(),

3630 LD->getMemOperand()->getFlags());

3632

3633

3634

3635 return DAG.getMergeValues({result, LD->getChain()}, dl);

3636}

3637

3640

3641 if (Op.getValueType() == MVT::i1)

3643

3644

3645

3646

3648 assert(LD->getValueType(0).isInteger() && LD->getMemoryVT().isInteger() &&

3649 "Unexpected fpext-load");

3651 LD->getChain(), LD->getBasePtr(), LD->getMemoryVT(),

3652 LD->getMemOperand());

3653 }

3654

3656}

3657

3659

3660

3661

3662

3663

3664

3665

3666

3667

3668 EVT VT = Op.getValueType();

3672 MemSDNode *LD = std::get<0>(Result);

3673 uint32_t UsedBytesMask = std::get<1>(Result);

3674

3675 SDLoc DL(LD);

3676

3677

3679

3680 OtherOps.push_back(DAG.getConstant(UsedBytesMask, DL, MVT::i32));

3681

3682

3683

3684 OtherOps.push_back(

3688 LD->getMemoryVT(), LD->getMemOperand());

3689 return NewLD;

3690 }

3692}

3693

3697 SDValue Val = N->getOperand(1);

3700 const EVT MemVT = N->getMemoryVT();

3701

3702

3703

3704 if (ValVT != MemVT)

3706

3707 const auto NumEltsAndEltVT =

3709 if (!NumEltsAndEltVT)

3711 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();

3712

3714

3715 Align Alignment = N->getAlign();

3717 if (Alignment < PrefAlign) {

3718

3719

3720

3721

3722

3724 }

3725

3726 unsigned Opcode;

3727 switch (NumElts) {

3728 default:

3730 case 2:

3732 break;

3733 case 4:

3735 break;

3736 case 8:

3738 break;

3739 }

3740

3742

3743

3744 Ops.push_back(N->getOperand(0));

3745

3746

3751

3752

3754 for (const unsigned I : llvm::seq(NumElts)) {

3757 NumEltsPerSubVector);

3759 }

3760 } else {

3762 for (const unsigned I : llvm::seq(NumElts)) {

3765

3766

3767

3768

3769

3772 Ops.push_back(ExtVal);

3773 }

3774 }

3775

3776

3777 Ops.append(N->op_begin() + 2, N->op_end());

3778

3781 N->getMemoryVT(), N->getMemOperand());

3782

3783

3784 return NewSt;

3785}

3786

3789 EVT VT = Store->getMemoryVT();

3790

3791 if (VT == MVT::i1)

3792 return LowerSTOREi1(Op, DAG);

3793

3794

3795

3797}

3798

3799

3800

3801

3802

3804 SDNode *Node = Op.getNode();

3805 SDLoc dl(Node);

3808 SDValue Tmp2 = ST->getBasePtr();

3810 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");

3813 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,

3814 ST->getAlign(), ST->getMemOperand()->getFlags());

3816}

3817

3820

3821

3822

3823 assert(Op.getOperand(1).getValueType() == MVT::i128 &&

3824 "Custom lowering for 128-bit CopyToReg only");

3825

3826 SDNode *Node = Op.getNode();

3827 SDLoc DL(Node);

3828

3834

3837

3838 NewOps[0] = Op->getOperand(0);

3839 NewOps[1] = Op->getOperand(1);

3840 NewOps[2] = Lo;

3841 NewOps[3] = Hi;

3843 NewOps[4] = Op->getOperand(3);

3844

3846}

3847

3848unsigned NVPTXTargetLowering::getNumRegisters(

3850 std::optional RegisterVT = std::nullopt) const {

3851 if (VT == MVT::i128 && RegisterVT == MVT::i128)

3852 return 1;

3854}

3855

3856bool NVPTXTargetLowering::splitValueIntoRegisterParts(

3858 unsigned NumParts, MVT PartVT, std::optionalCallingConv::ID CC) const {

3859 if (Val.getValueType() == MVT::i128 && NumParts == 1) {

3860 Parts[0] = Val;

3861 return true;

3862 }

3863 return false;

3864}

3865

3866

3867

3868

3869

3871 EVT T) const {

3872 StringRef SavedStr = nvTM->getStrPool().save(

3875}

3876

3878 EVT T) const {

3879 const StringRef SavedStr = nvTM->getStrPool().save("param" + Twine(I));

3881}

3882

3890

3892

3895

3896

3897

3898

3899

3900

3901

3902

3903

3904

3905

3906 auto AllIns = ArrayRef(Ins);

3907 for (const auto &Arg : F.args()) {

3908 const auto ArgIns = AllIns.take_while(

3909 [&](auto I) { return I.OrigArgIndex == Arg.getArgNo(); });

3910 AllIns = AllIns.drop_front(ArgIns.size());

3911

3912 Type *Ty = Arg.getType();

3913

3914 if (ArgIns.empty())

3916

3917 if (Arg.use_empty()) {

3918

3919 for (const auto &In : ArgIns) {

3920 assert(!In.Used && "Arg.use_empty() is true but Arg is used?");

3922 }

3923 continue;

3924 }

3925

3926 SDValue ArgSymbol = getParamSymbol(DAG, Arg.getArgNo(), PtrVT);

3927

3928

3929

3930

3931

3932 if (Arg.hasByValAttr()) {

3933

3934

3935

3936

3937

3938

3939

3940 assert(ArgIns.size() == 1 && "ByVal argument must be a pointer");

3941 const auto &ByvalIn = ArgIns[0];

3943 "Ins type did not match function type");

3944 assert(ByvalIn.VT == PtrVT && "ByVal argument must be a pointer");

3945

3948 P = ArgSymbol;

3949 P.getNode()->setIROrder(Arg.getArgNo() + 1);

3950 } else {

3951 P = DAG.getNode(NVPTXISD::MoveParam, dl, ByvalIn.VT, ArgSymbol);

3952 P.getNode()->setIROrder(Arg.getArgNo() + 1);

3955 }

3957 } else {

3961 assert(VTs.size() == ArgIns.size() && "Size mismatch");

3962 assert(VTs.size() == Offsets.size() && "Size mismatch");

3963

3965 &F, Ty, Arg.getArgNo() + AttributeList::FirstArgIndex, DL);

3966

3967 unsigned I = 0;

3969 for (const unsigned NumElts : VI) {

3970

3971 const EVT LoadVT = VTs[I] == MVT::i1 ? MVT::i8 : VTs[I];

3973

3976

3979 DAG.getLoad(VecVT, dl, Root, VecAddr,

3983 P.getNode()->setIROrder(Arg.getArgNo() + 1);

3984 for (const unsigned J : llvm::seq(NumElts)) {

3986

3988 DAG, dl);

3990 }

3991 I += NumElts;

3992 }

3993 }

3994 }

3995

3996 if (!OutChains.empty())

3998

3999 return Chain;

4000}

4001

4004 bool isVarArg,

4009 Type *RetTy = F.getReturnType();

4010

4012 assert(OutVals.empty() && Outs.empty() && "Return value expected for void");

4013 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);

4014 }

4015

4018

4021

4022

4023

4024

4025 const bool ExtendIntegerRetVal =

4026 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;

4027

4031 assert(VTs.size() == OutVals.size() && "Bad return value decomposition");

4032

4033 const auto GetRetVal = [&](unsigned I) -> SDValue {

4034 SDValue RetVal = OutVals[I];

4037 "OutVal type should always be legal");

4038

4040 const EVT StoreVT =

4041 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);

4042 return correctParamType(RetVal, StoreVT, Outs[I].Flags, DAG, dl);

4043 };

4044

4045 unsigned I = 0;

4047 for (const unsigned NumElts : VI) {

4048 const MaybeAlign CurrentAlign = ExtendIntegerRetVal

4051

4053 NumElts, dl, DAG, [&](unsigned K) { return GetRetVal(I + K); });

4054

4057

4058 Chain = DAG.getStore(Chain, dl, Val, Ptr,

4060

4061 I += NumElts;

4062 }

4063

4064 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);

4065}

4066

4070 if (Constraint.size() > 1)

4071 return;

4073}

4074

4075

4076

4077

4078

4079

4085 default:

4086 return false;

4087 case Intrinsic::nvvm_match_all_sync_i32p:

4088 case Intrinsic::nvvm_match_all_sync_i64p:

4090

4091

4092

4093 Info.memVT = MVT::i1;

4094

4095

4097 return true;

4098 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:

4099 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:

4100 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:

4101 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:

4102 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:

4103 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:

4104 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:

4105 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:

4106 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:

4107 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:

4108 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:

4109 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:

4110 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:

4111 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:

4112 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:

4113 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:

4114 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:

4115 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:

4116 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:

4117 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:

4118 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:

4119 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:

4120 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:

4121 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {

4123 Info.memVT = MVT::v8f16;

4124 Info.ptrVal = I.getArgOperand(0);

4125 Info.offset = 0;

4127 Info.align = Align(16);

4128 return true;

4129 }

4130 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:

4131 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:

4132 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:

4133 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:

4134 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:

4135 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:

4136 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:

4137 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:

4138 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:

4139 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:

4140 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:

4141 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:

4142 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:

4143 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:

4144 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:

4145 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:

4146 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:

4147 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:

4148 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:

4149 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:

4150 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:

4151 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:

4152 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:

4153 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {

4155 Info.memVT = MVT::v2i32;

4156 Info.ptrVal = I.getArgOperand(0);

4157 Info.offset = 0;

4159 Info.align = Align(8);

4160 return true;

4161 }

4162

4163 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:

4164 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:

4165 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:

4166 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:

4167 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:

4168 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:

4169 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:

4170 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:

4171 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:

4172 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:

4173 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:

4174 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:

4175 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:

4176 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:

4177 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:

4178 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:

4179

4180 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:

4181 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:

4182 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:

4183 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:

4184 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:

4185 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:

4186 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:

4187 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:

4188 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:

4189 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:

4190 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:

4191 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:

4192 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:

4193 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:

4194 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:

4195 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:

4196 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:

4197 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16:

4198 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8:

4199 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b4x16_p64:

4200 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b6x16_p32:

4201 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b4x16_p64:

4202 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b6x16_p32: {

4204 Info.memVT = MVT::v4i32;

4205 Info.ptrVal = I.getArgOperand(0);

4206 Info.offset = 0;

4208 Info.align = Align(16);

4209 return true;

4210 }

4211

4212 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:

4213 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:

4214 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:

4215 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:

4216 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:

4217 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:

4218 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:

4219 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:

4220

4221 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:

4222 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:

4223 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:

4224 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:

4225 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:

4226 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:

4227 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:

4228 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:

4229 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:

4230 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:

4231 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:

4232 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:

4233 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:

4234 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:

4235 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:

4236 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:

4237 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:

4238 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:

4239 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:

4240 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:

4241 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:

4242 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16:

4243 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b4x16_p64:

4244 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b6x16_p32: {

4246 Info.memVT = MVT::i32;

4247 Info.ptrVal = I.getArgOperand(0);

4248 Info.offset = 0;

4250 Info.align = Align(4);

4251 return true;

4252 }

4253

4254 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:

4255 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:

4256 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:

4257 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:

4258 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:

4259 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:

4260 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:

4261 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:

4262 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:

4263 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:

4264 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:

4265 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {

4267 Info.memVT = MVT::v4f16;

4268 Info.ptrVal = I.getArgOperand(0);

4269 Info.offset = 0;

4271 Info.align = Align(16);

4272 return true;

4273 }

4274

4275 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:

4276 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:

4277 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:

4278 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:

4279 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:

4280 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:

4281 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:

4282 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:

4283 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:

4284 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:

4285 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:

4286 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:

4287 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:

4288 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:

4289 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:

4290 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {

4292 Info.memVT = MVT::v8f32;

4293 Info.ptrVal = I.getArgOperand(0);

4294 Info.offset = 0;

4296 Info.align = Align(16);

4297 return true;

4298 }

4299

4300 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:

4301 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:

4302 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:

4303 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:

4304

4305 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:

4306 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:

4307 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:

4308 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:

4309

4310 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:

4311 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:

4312 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:

4313 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:

4314 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:

4315 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:

4316 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:

4317 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:

4318 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:

4319 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:

4320 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:

4321 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {

4323 Info.memVT = MVT::v8i32;

4324 Info.ptrVal = I.getArgOperand(0);

4325 Info.offset = 0;

4327 Info.align = Align(16);

4328 return true;

4329 }

4330

4331 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:

4332 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:

4333 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:

4334 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:

4335 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:

4336 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:

4337 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:

4338 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:

4339 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:

4340 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16:

4341 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8:

4342 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b4x16_p64:

4343 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b6x16_p32:

4344 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b4x16_p64:

4345 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b6x16_p32: {

4347 Info.memVT = MVT::v2i32;

4348 Info.ptrVal = I.getArgOperand(0);

4349 Info.offset = 0;

4351 Info.align = Align(8);

4352 return true;

4353 }

4354

4355 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:

4356 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:

4357 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:

4358 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:

4359

4360 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:

4361 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:

4362 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:

4363 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {

4365 Info.memVT = MVT::f64;

4366 Info.ptrVal = I.getArgOperand(0);

4367 Info.offset = 0;

4369 Info.align = Align(8);

4370 return true;

4371 }

4372

4373 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:

4374 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:

4375 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:

4376 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {

4378 Info.memVT = MVT::v2f64;

4379 Info.ptrVal = I.getArgOperand(0);

4380 Info.offset = 0;

4382 Info.align = Align(16);

4383 return true;

4384 }

4385

4386 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:

4387 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:

4388 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:

4389 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:

4390 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:

4391 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:

4392 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:

4393 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:

4394 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:

4395 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:

4396 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:

4397 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {

4399 Info.memVT = MVT::v4f16;

4400 Info.ptrVal = I.getArgOperand(0);

4401 Info.offset = 0;

4403 Info.align = Align(16);

4404 return true;

4405 }

4406

4407 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:

4408 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:

4409 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:

4410 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:

4411 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:

4412 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:

4413 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:

4414 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:

4415 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:

4416 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:

4417 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:

4418 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:

4419 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:

4420 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:

4421 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:

4422 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {

4424 Info.memVT = MVT::v8f32;

4425 Info.ptrVal = I.getArgOperand(0);

4426 Info.offset = 0;

4428 Info.align = Align(16);

4429 return true;

4430 }

4431

4432 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:

4433 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:

4434 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:

4435 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:

4436 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:

4437 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:

4438 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:

4439 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:

4440 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:

4441 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:

4442 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:

4443 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {

4445 Info.memVT = MVT::v8i32;

4446 Info.ptrVal = I.getArgOperand(0);

4447 Info.offset = 0;

4449 Info.align = Align(16);

4450 return true;

4451 }

4452

4453 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:

4454 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:

4455 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:

4456 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:

4457 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:

4458 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:

4459 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:

4460 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride:

4461 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_b16:

4462 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_trans_b16:

4463 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x2_trans_b8: {

4465 Info.memVT = MVT::v2i32;

4466 Info.ptrVal = I.getArgOperand(0);

4467 Info.offset = 0;

4469 Info.align = Align(8);

4470 return true;

4471 }

4472

4473 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:

4474 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:

4475 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:

4476 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {

4478 Info.memVT = MVT::v2f64;

4479 Info.ptrVal = I.getArgOperand(0);

4480 Info.offset = 0;

4482 Info.align = Align(16);

4483 return true;

4484 }

4485

4486 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_b16:

4487 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_trans_b16:

4488 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x1_trans_b8: {

4490 Info.memVT = MVT::i32;

4491 Info.ptrVal = I.getArgOperand(0);

4492 Info.offset = 0;

4494 Info.align = Align(4);

4495 return true;

4496 }

4497

4498 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_b16:

4499 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_trans_b16:

4500 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x4_trans_b8: {

4502 Info.memVT = MVT::v4i32;

4503 Info.ptrVal = I.getArgOperand(0);

4504 Info.offset = 0;

4506 Info.align = Align(16);

4507 return true;

4508 }

4509

4510 case Intrinsic::nvvm_atomic_add_gen_f_cta:

4511 case Intrinsic::nvvm_atomic_add_gen_f_sys:

4512 case Intrinsic::nvvm_atomic_add_gen_i_cta:

4513 case Intrinsic::nvvm_atomic_add_gen_i_sys:

4514 case Intrinsic::nvvm_atomic_and_gen_i_cta:

4515 case Intrinsic::nvvm_atomic_and_gen_i_sys:

4516 case Intrinsic::nvvm_atomic_cas_gen_i_cta:

4517 case Intrinsic::nvvm_atomic_cas_gen_i_sys:

4518 case Intrinsic::nvvm_atomic_dec_gen_i_cta:

4519 case Intrinsic::nvvm_atomic_dec_gen_i_sys:

4520 case Intrinsic::nvvm_atomic_inc_gen_i_cta:

4521 case Intrinsic::nvvm_atomic_inc_gen_i_sys:

4522 case Intrinsic::nvvm_atomic_max_gen_i_cta:

4523 case Intrinsic::nvvm_atomic_max_gen_i_sys:

4524 case Intrinsic::nvvm_atomic_min_gen_i_cta:

4525 case Intrinsic::nvvm_atomic_min_gen_i_sys:

4526 case Intrinsic::nvvm_atomic_or_gen_i_cta:

4527 case Intrinsic::nvvm_atomic_or_gen_i_sys:

4528 case Intrinsic::nvvm_atomic_exch_gen_i_cta:

4529 case Intrinsic::nvvm_atomic_exch_gen_i_sys:

4530 case Intrinsic::nvvm_atomic_xor_gen_i_cta:

4531 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {

4532 auto &DL = I.getDataLayout();

4535 Info.ptrVal = I.getArgOperand(0);

4536 Info.offset = 0;

4538 Info.align.reset();

4539 return true;

4540 }

4541

4542 case Intrinsic::nvvm_prefetch_tensormap: {

4543 auto &DL = I.getDataLayout();

4546 Info.ptrVal = I.getArgOperand(0);

4547 Info.offset = 0;

4548 Info.flags =

4550 Info.align.reset();

4551 return true;

4552 }

4553

4554 case Intrinsic::nvvm_ldu_global_i:

4555 case Intrinsic::nvvm_ldu_global_f:

4556 case Intrinsic::nvvm_ldu_global_p: {

4558 Info.memVT = getValueType(I.getDataLayout(), I.getType());

4559 Info.ptrVal = I.getArgOperand(0);

4560 Info.offset = 0;

4562 Info.align = cast(I.getArgOperand(1))->getMaybeAlignValue();

4563

4564 return true;

4565 }

4566 case Intrinsic::nvvm_tex_1d_v4f32_s32:

4567 case Intrinsic::nvvm_tex_1d_v4f32_f32:

4568 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:

4569 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:

4570 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:

4571 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:

4572 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:

4573 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:

4574 case Intrinsic::nvvm_tex_2d_v4f32_s32:

4575 case Intrinsic::nvvm_tex_2d_v4f32_f32:

4576 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:

4577 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:

4578 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:

4579 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:

4580 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:

4581 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:

4582 case Intrinsic::nvvm_tex_3d_v4f32_s32:

4583 case Intrinsic::nvvm_tex_3d_v4f32_f32:

4584 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:

4585 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:

4586 case Intrinsic::nvvm_tex_cube_v4f32_f32:

4587 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:

4588 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:

4589 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:

4590 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:

4591 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:

4592 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:

4593 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:

4594 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:

4595 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:

4596 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:

4597 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:

4598 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:

4599 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:

4600 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:

4601 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:

4602 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:

4603 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:

4604 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:

4605 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:

4606 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:

4607 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:

4608 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:

4609 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:

4610 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:

4611 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:

4612 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:

4613 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:

4614 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:

4615 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:

4616 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:

4617 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:

4618 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:

4619 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:

4620 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:

4621 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:

4622 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:

4623 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:

4625 Info.memVT = MVT::v4f32;

4626 Info.ptrVal = nullptr;

4627 Info.offset = 0;

4629 Info.align = Align(16);

4630 return true;

4631

4632 case Intrinsic::nvvm_tex_1d_v4s32_s32:

4633 case Intrinsic::nvvm_tex_1d_v4s32_f32:

4634 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:

4635 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:

4636 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:

4637 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:

4638 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:

4639 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:

4640 case Intrinsic::nvvm_tex_2d_v4s32_s32:

4641 case Intrinsic::nvvm_tex_2d_v4s32_f32:

4642 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:

4643 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:

4644 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:

4645 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:

4646 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:

4647 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:

4648 case Intrinsic::nvvm_tex_3d_v4s32_s32:

4649 case Intrinsic::nvvm_tex_3d_v4s32_f32:

4650 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:

4651 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:

4652 case Intrinsic::nvvm_tex_cube_v4s32_f32:

4653 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:

4654 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:

4655 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:

4656 case Intrinsic::nvvm_tex_cube_v4u32_f32:

4657 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:

4658 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:

4659 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:

4660 case Intrinsic::nvvm_tex_1d_v4u32_s32:

4661 case Intrinsic::nvvm_tex_1d_v4u32_f32:

4662 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:

4663 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:

4664 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:

4665 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:

4666 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:

4667 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:

4668 case Intrinsic::nvvm_tex_2d_v4u32_s32:

4669 case Intrinsic::nvvm_tex_2d_v4u32_f32:

4670 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:

4671 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:

4672 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:

4673 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:

4674 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:

4675 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:

4676 case Intrinsic::nvvm_tex_3d_v4u32_s32:

4677 case Intrinsic::nvvm_tex_3d_v4u32_f32:

4678 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:

4679 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:

4680 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:

4681 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:

4682 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:

4683 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:

4684 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:

4685 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:

4686 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:

4687 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:

4688 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:

4689 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:

4690 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:

4691 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:

4692 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:

4693 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:

4694 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:

4695 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:

4696 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:

4697 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:

4698 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:

4699 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:

4700 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:

4701 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:

4702 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:

4703 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:

4704 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:

4705 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:

4706 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:

4707 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:

4708 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:

4709 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:

4710 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:

4711 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:

4712 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:

4713 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:

4714 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:

4715 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:

4716 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:

4717 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:

4718 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:

4719 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:

4720 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:

4721 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:

4722 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:

4723 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:

4724 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:

4725 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:

4726 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:

4727 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:

4728 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:

4729 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:

4730 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:

4731 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:

4732 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:

4733 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:

4734 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:

4735 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:

4736 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:

4737 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:

4738 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:

4739 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:

4740 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:

4741 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:

4742 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:

4743 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:

4744 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:

4745 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:

4746 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:

4747 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:

4749 Info.memVT = MVT::v4i32;

4750 Info.ptrVal = nullptr;

4751 Info.offset = 0;

4753 Info.align = Align(16);

4754 return true;

4755

4756 case Intrinsic::nvvm_suld_1d_i8_clamp:

4757 case Intrinsic::nvvm_suld_1d_v2i8_clamp:

4758 case Intrinsic::nvvm_suld_1d_v4i8_clamp:

4759 case Intrinsic::nvvm_suld_1d_array_i8_clamp:

4760 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:

4761 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:

4762 case Intrinsic::nvvm_suld_2d_i8_clamp:

4763 case Intrinsic::nvvm_suld_2d_v2i8_clamp:

4764 case Intrinsic::nvvm_suld_2d_v4i8_clamp:

4765 case Intrinsic::nvvm_suld_2d_array_i8_clamp:

4766 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:

4767 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:

4768 case Intrinsic::nvvm_suld_3d_i8_clamp:

4769 case Intrinsic::nvvm_suld_3d_v2i8_clamp:

4770 case Intrinsic::nvvm_suld_3d_v4i8_clamp:

4771 case Intrinsic::nvvm_suld_1d_i8_trap:

4772 case Intrinsic::nvvm_suld_1d_v2i8_trap:

4773 case Intrinsic::nvvm_suld_1d_v4i8_trap:

4774 case Intrinsic::nvvm_suld_1d_array_i8_trap:

4775 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:

4776 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:

4777 case Intrinsic::nvvm_suld_2d_i8_trap:

4778 case Intrinsic::nvvm_suld_2d_v2i8_trap:

4779 case Intrinsic::nvvm_suld_2d_v4i8_trap:

4780 case Intrinsic::nvvm_suld_2d_array_i8_trap:

4781 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:

4782 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:

4783 case Intrinsic::nvvm_suld_3d_i8_trap:

4784 case Intrinsic::nvvm_suld_3d_v2i8_trap:

4785 case Intrinsic::nvvm_suld_3d_v4i8_trap:

4786 case Intrinsic::nvvm_suld_1d_i8_zero:

4787 case Intrinsic::nvvm_suld_1d_v2i8_zero:

4788 case Intrinsic::nvvm_suld_1d_v4i8_zero:

4789 case Intrinsic::nvvm_suld_1d_array_i8_zero:

4790 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:

4791 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:

4792 case Intrinsic::nvvm_suld_2d_i8_zero:

4793 case Intrinsic::nvvm_suld_2d_v2i8_zero:

4794 case Intrinsic::nvvm_suld_2d_v4i8_zero:

4795 case Intrinsic::nvvm_suld_2d_array_i8_zero:

4796 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:

4797 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:

4798 case Intrinsic::nvvm_suld_3d_i8_zero:

4799 case Intrinsic::nvvm_suld_3d_v2i8_zero:

4800 case Intrinsic::nvvm_suld_3d_v4i8_zero:

4802 Info.memVT = MVT::i8;

4803 Info.ptrVal = nullptr;

4804 Info.offset = 0;

4806 Info.align = Align(16);

4807 return true;

4808

4809 case Intrinsic::nvvm_suld_1d_i16_clamp:

4810 case Intrinsic::nvvm_suld_1d_v2i16_clamp:

4811 case Intrinsic::nvvm_suld_1d_v4i16_clamp:

4812 case Intrinsic::nvvm_suld_1d_array_i16_clamp:

4813 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:

4814 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:

4815 case Intrinsic::nvvm_suld_2d_i16_clamp:

4816 case Intrinsic::nvvm_suld_2d_v2i16_clamp:

4817 case Intrinsic::nvvm_suld_2d_v4i16_clamp:

4818 case Intrinsic::nvvm_suld_2d_array_i16_clamp:

4819 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:

4820 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:

4821 case Intrinsic::nvvm_suld_3d_i16_clamp:

4822 case Intrinsic::nvvm_suld_3d_v2i16_clamp:

4823 case Intrinsic::nvvm_suld_3d_v4i16_clamp:

4824 case Intrinsic::nvvm_suld_1d_i16_trap:

4825 case Intrinsic::nvvm_suld_1d_v2i16_trap:

4826 case Intrinsic::nvvm_suld_1d_v4i16_trap:

4827 case Intrinsic::nvvm_suld_1d_array_i16_trap:

4828 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:

4829 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:

4830 case Intrinsic::nvvm_suld_2d_i16_trap:

4831 case Intrinsic::nvvm_suld_2d_v2i16_trap:

4832 case Intrinsic::nvvm_suld_2d_v4i16_trap:

4833 case Intrinsic::nvvm_suld_2d_array_i16_trap:

4834 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:

4835 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:

4836 case Intrinsic::nvvm_suld_3d_i16_trap:

4837 case Intrinsic::nvvm_suld_3d_v2i16_trap:

4838 case Intrinsic::nvvm_suld_3d_v4i16_trap:

4839 case Intrinsic::nvvm_suld_1d_i16_zero:

4840 case Intrinsic::nvvm_suld_1d_v2i16_zero:

4841 case Intrinsic::nvvm_suld_1d_v4i16_zero:

4842 case Intrinsic::nvvm_suld_1d_array_i16_zero:

4843 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:

4844 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:

4845 case Intrinsic::nvvm_suld_2d_i16_zero:

4846 case Intrinsic::nvvm_suld_2d_v2i16_zero:

4847 case Intrinsic::nvvm_suld_2d_v4i16_zero:

4848 case Intrinsic::nvvm_suld_2d_array_i16_zero:

4849 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:

4850 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:

4851 case Intrinsic::nvvm_suld_3d_i16_zero:

4852 case Intrinsic::nvvm_suld_3d_v2i16_zero:

4853 case Intrinsic::nvvm_suld_3d_v4i16_zero:

4855 Info.memVT = MVT::i16;

4856 Info.ptrVal = nullptr;

4857 Info.offset = 0;

4859 Info.align = Align(16);

4860 return true;

4861

4862 case Intrinsic::nvvm_suld_1d_i32_clamp:

4863 case Intrinsic::nvvm_suld_1d_v2i32_clamp:

4864 case Intrinsic::nvvm_suld_1d_v4i32_clamp:

4865 case Intrinsic::nvvm_suld_1d_array_i32_clamp:

4866 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:

4867 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:

4868 case Intrinsic::nvvm_suld_2d_i32_clamp:

4869 case Intrinsic::nvvm_suld_2d_v2i32_clamp:

4870 case Intrinsic::nvvm_suld_2d_v4i32_clamp:

4871 case Intrinsic::nvvm_suld_2d_array_i32_clamp:

4872 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:

4873 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:

4874 case Intrinsic::nvvm_suld_3d_i32_clamp:

4875 case Intrinsic::nvvm_suld_3d_v2i32_clamp:

4876 case Intrinsic::nvvm_suld_3d_v4i32_clamp:

4877 case Intrinsic::nvvm_suld_1d_i32_trap:

4878 case Intrinsic::nvvm_suld_1d_v2i32_trap:

4879 case Intrinsic::nvvm_suld_1d_v4i32_trap:

4880 case Intrinsic::nvvm_suld_1d_array_i32_trap:

4881 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:

4882 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:

4883 case Intrinsic::nvvm_suld_2d_i32_trap:

4884 case Intrinsic::nvvm_suld_2d_v2i32_trap:

4885 case Intrinsic::nvvm_suld_2d_v4i32_trap:

4886 case Intrinsic::nvvm_suld_2d_array_i32_trap:

4887 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:

4888 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:

4889 case Intrinsic::nvvm_suld_3d_i32_trap:

4890 case Intrinsic::nvvm_suld_3d_v2i32_trap:

4891 case Intrinsic::nvvm_suld_3d_v4i32_trap:

4892 case Intrinsic::nvvm_suld_1d_i32_zero:

4893 case Intrinsic::nvvm_suld_1d_v2i32_zero:

4894 case Intrinsic::nvvm_suld_1d_v4i32_zero:

4895 case Intrinsic::nvvm_suld_1d_array_i32_zero:

4896 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:

4897 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:

4898 case Intrinsic::nvvm_suld_2d_i32_zero:

4899 case Intrinsic::nvvm_suld_2d_v2i32_zero:

4900 case Intrinsic::nvvm_suld_2d_v4i32_zero:

4901 case Intrinsic::nvvm_suld_2d_array_i32_zero:

4902 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:

4903 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:

4904 case Intrinsic::nvvm_suld_3d_i32_zero:

4905 case Intrinsic::nvvm_suld_3d_v2i32_zero:

4906 case Intrinsic::nvvm_suld_3d_v4i32_zero:

4908 Info.memVT = MVT::i32;

4909 Info.ptrVal = nullptr;

4910 Info.offset = 0;

4912 Info.align = Align(16);

4913 return true;

4914

4915 case Intrinsic::nvvm_suld_1d_i64_clamp:

4916 case Intrinsic::nvvm_suld_1d_v2i64_clamp:

4917 case Intrinsic::nvvm_suld_1d_array_i64_clamp:

4918 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:

4919 case Intrinsic::nvvm_suld_2d_i64_clamp:

4920 case Intrinsic::nvvm_suld_2d_v2i64_clamp:

4921 case Intrinsic::nvvm_suld_2d_array_i64_clamp:

4922 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:

4923 case Intrinsic::nvvm_suld_3d_i64_clamp:

4924 case Intrinsic::nvvm_suld_3d_v2i64_clamp:

4925 case Intrinsic::nvvm_suld_1d_i64_trap:

4926 case Intrinsic::nvvm_suld_1d_v2i64_trap:

4927 case Intrinsic::nvvm_suld_1d_array_i64_trap:

4928 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:

4929 case Intrinsic::nvvm_suld_2d_i64_trap:

4930 case Intrinsic::nvvm_suld_2d_v2i64_trap:

4931 case Intrinsic::nvvm_suld_2d_array_i64_trap:

4932 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:

4933 case Intrinsic::nvvm_suld_3d_i64_trap:

4934 case Intrinsic::nvvm_suld_3d_v2i64_trap:

4935 case Intrinsic::nvvm_suld_1d_i64_zero:

4936 case Intrinsic::nvvm_suld_1d_v2i64_zero:

4937 case Intrinsic::nvvm_suld_1d_array_i64_zero:

4938 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:

4939 case Intrinsic::nvvm_suld_2d_i64_zero:

4940 case Intrinsic::nvvm_suld_2d_v2i64_zero:

4941 case Intrinsic::nvvm_suld_2d_array_i64_zero:

4942 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:

4943 case Intrinsic::nvvm_suld_3d_i64_zero:

4944 case Intrinsic::nvvm_suld_3d_v2i64_zero:

4946 Info.memVT = MVT::i64;

4947 Info.ptrVal = nullptr;

4948 Info.offset = 0;

4950 Info.align = Align(16);

4951 return true;

4952

4953 case Intrinsic::nvvm_tcgen05_ld_16x64b_x1:

4954 case Intrinsic::nvvm_tcgen05_ld_32x32b_x1:

4955 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x1: {

4957 Info.memVT = MVT::v1i32;

4958 Info.ptrVal = I.getArgOperand(0);

4959 Info.offset = 0;

4961 Info.align.reset();

4962 return true;

4963 }

4964

4965 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:

4966 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:

4967 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:

4968 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2: {

4970 Info.memVT = MVT::v2i32;

4971 Info.ptrVal = I.getArgOperand(0);

4972 Info.offset = 0;

4974 Info.align.reset();

4975 return true;

4976 }

4977

4978 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:

4979 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:

4980 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:

4981 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:

4982 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4: {

4984 Info.memVT = MVT::v4i32;

4985 Info.ptrVal = I.getArgOperand(0);

4986 Info.offset = 0;

4988 Info.align.reset();

4989 return true;

4990 }

4991

4992 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:

4993 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:

4994 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:

4995 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:

4996 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8: {

4998 Info.memVT = MVT::v8i32;

4999 Info.ptrVal = I.getArgOperand(0);

5000 Info.offset = 0;

5002 Info.align.reset();

5003 return true;

5004 }

5005

5006 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:

5007 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:

5008 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:

5009 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:

5010 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16: {

5012 Info.memVT = MVT::v16i32;

5013 Info.ptrVal = I.getArgOperand(0);

5014 Info.offset = 0;

5016 Info.align.reset();

5017 return true;

5018 }

5019

5020 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:

5021 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:

5022 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:

5023 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:

5024 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32: {

5026 Info.memVT = MVT::v32i32;

5027 Info.ptrVal = I.getArgOperand(0);

5028 Info.offset = 0;

5030 Info.align.reset();

5031 return true;

5032 }

5033

5034 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:

5035 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:

5036 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:

5037 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:

5038 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64: {

5040 Info.memVT = MVT::v64i32;

5041 Info.ptrVal = I.getArgOperand(0);

5042 Info.offset = 0;

5044 Info.align.reset();

5045 return true;

5046 }

5047

5048 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:

5049 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:

5050 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:

5051 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:

5052 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128: {

5054 Info.memVT = MVT::v128i32;

5055 Info.ptrVal = I.getArgOperand(0);

5056 Info.offset = 0;

5058 Info.align.reset();

5059 return true;

5060 }

5061

5062 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:

5063 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:

5064 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1: {

5066 Info.memVT = MVT::i32;

5067 Info.ptrVal = I.getArgOperand(0);

5068 Info.offset = 0;

5070 Info.align.reset();

5071 return true;

5072 }

5073

5074 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:

5075 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:

5076 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:

5077 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2: {

5079 Info.memVT = MVT::v2i32;

5080 Info.ptrVal = I.getArgOperand(0);

5081 Info.offset = 0;

5083 Info.align.reset();

5084 return true;

5085 }

5086

5087 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:

5088 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:

5089 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:

5090 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:

5091 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4: {

5093 Info.memVT = MVT::v4i32;

5094 Info.ptrVal = I.getArgOperand(0);

5095 Info.offset = 0;

5097 Info.align.reset();

5098 return true;

5099 }

5100

5101 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:

5102 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:

5103 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:

5104 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:

5105 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8: {

5107 Info.memVT = MVT::v8i32;

5108 Info.ptrVal = I.getArgOperand(0);

5109 Info.offset = 0;

5111 Info.align.reset();

5112 return true;

5113 }

5114

5115 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:

5116 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:

5117 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:

5118 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:

5119 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16: {

5121 Info.memVT = MVT::v16i32;

5122 Info.ptrVal = I.getArgOperand(0);

5123 Info.offset = 0;

5125 Info.align.reset();

5126 return true;

5127 }

5128

5129 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:

5130 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:

5131 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:

5132 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:

5133 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32: {

5135 Info.memVT = MVT::v32i32;

5136 Info.ptrVal = I.getArgOperand(0);

5137 Info.offset = 0;

5139 Info.align.reset();

5140 return true;

5141 }

5142

5143 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:

5144 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:

5145 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:

5146 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:

5147 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64: {

5149 Info.memVT = MVT::v64i32;

5150 Info.ptrVal = I.getArgOperand(0);

5151 Info.offset = 0;

5153 Info.align.reset();

5154 return true;

5155 }

5156

5157 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:

5158 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:

5159 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:

5160 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:

5161 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128: {

5163 Info.memVT = MVT::v128i32;

5164 Info.ptrVal = I.getArgOperand(0);

5165 Info.offset = 0;

5167 Info.align.reset();

5168 return true;

5169 }

5170 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:

5171 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:

5172 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:

5173 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:

5174 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:

5175 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:

5176 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:

5177 case Intrinsic::

5178 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:

5179 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:

5180 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:

5181 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:

5182 case Intrinsic::

5183 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: {

5184

5186 Info.memVT = MVT::v4i32;

5187 Info.ptrVal = I.getArgOperand(0);

5188 Info.offset = 0;

5190 Info.align = Align(16);

5191 return true;

5192 }

5193

5194 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:

5195 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:

5196 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:

5197 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:

5198 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:

5199 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:

5200 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:

5201 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:

5202 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:

5203 case Intrinsic::

5204 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:

5205 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:

5206 case Intrinsic::

5207 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: {

5208

5210 Info.memVT = MVT::v8i32;

5211 Info.ptrVal = I.getArgOperand(0);

5212 Info.offset = 0;

5214 Info.align = Align(16);

5215 return true;

5216 }

5217 }

5218 return false;

5219}

5220

5221

5222

5223

5224

5225

5226

5227

5230

5231

5232 const Align ABITypeAlign = std::min(Align(128), DL.getABITypeAlign(ArgTy));

5233

5234

5235

5236

5237 if (F || F->hasLocalLinkage() ||

5238 F->hasAddressTaken(nullptr,

5239 false,

5240 true,

5241 true))

5242 return ABITypeAlign;

5243

5245 return std::max(Align(16), ABITypeAlign);

5246}

5247

5248

5252 Align ArgAlign = InitialAlign;

5253

5254 if (F)

5256

5257

5258

5259

5260

5261

5262

5263

5264

5265

5267 ArgAlign = std::max(ArgAlign, Align(4));

5268

5269 return ArgAlign;

5270}

5271

5272

5273

5274

5276 int Idx) const {

5277 std::string ParamName;

5279

5281 if (Idx < 0)

5282 ParamStr << "_vararg";

5283 else

5284 ParamStr << "_param_" << Idx;

5285

5286 return ParamName;

5287}

5288

5289

5290

5291

5292

5293

5297

5298

5299

5300

5301

5302

5303

5304

5305

5306

5308 return false;

5309

5312

5313 switch (AM.Scale) {

5314 case 0:

5315 break;

5316 case 1:

5317 if (AM.HasBaseReg)

5318 return false;

5319

5320 break;

5321 default:

5322

5323 return false;

5324 }

5325 return true;

5326}

5327

5328

5329

5330

5331

5332

5333

5336 if (Constraint.size() == 1) {

5337 switch (Constraint[0]) {

5338 default:

5339 break;

5340 case 'b':

5341 case 'r':

5342 case 'h':

5343 case 'c':

5344 case 'l':

5345 case 'f':

5346 case 'd':

5347 case 'q':

5348 case '0':

5349 case 'N':

5351 }

5352 }

5354}

5355

5356std::pair<unsigned, const TargetRegisterClass *>

5359 MVT VT) const {

5360 if (Constraint.size() == 1) {

5361 switch (Constraint[0]) {

5362 case 'b':

5363 return std::make_pair(0U, &NVPTX::B1RegClass);

5364 case 'c':

5365 case 'h':

5366 return std::make_pair(0U, &NVPTX::B16RegClass);

5367 case 'r':

5368 case 'f':

5369 return std::make_pair(0U, &NVPTX::B32RegClass);

5370 case 'l':

5371 case 'N':

5372 case 'd':

5373 return std::make_pair(0U, &NVPTX::B64RegClass);

5374 case 'q': {

5375 if (STI.getSmVersion() < 70)

5377 "supported for sm_70 and higher!");

5378 return std::make_pair(0U, &NVPTX::B128RegClass);

5379 }

5380 }

5381 }

5383}

5384

5385

5386

5387

5388

5391

5394

5395

5397 return false;

5398

5399

5401 return true;

5402

5403 return false;

5404}

5405

5408 return Const && Const->getZExtValue() == 0;

5409}

5410

5411

5412

5413

5414

5419

5420

5421

5422

5423

5426

5427

5428

5429

5431 unsigned ZeroOpNum;

5433 ZeroOpNum = 1;

5435 ZeroOpNum = 2;

5436 else

5438

5440 if (M->getOpcode() != ISD::MUL || !M.getNode()->hasOneUse())

5442

5448 ((ZeroOpNum == 1) ? N1 : MAD),

5449 ((ZeroOpNum == 1) ? MAD : N1));

5450 }

5451

5453}

5454

5464 (N->getFlags().hasAllowContract() &&

5467

5468

5469

5470

5471

5472

5473

5474

5475

5476 int numUses = 0;

5477 int nonAddCount = 0;

5479 numUses++;

5481 ++nonAddCount;

5482 if (numUses >= 5)

5484 }

5485 if (nonAddCount) {

5486 int orderNo = N->getIROrder();

5488

5489

5490

5491

5492 if (orderNo - orderNo2 < 500)

5494

5495

5496

5497

5498 bool opIsLive = false;

5501

5503 opIsLive = true;

5504

5505 if (!opIsLive)

5507 int orderNo3 = User->getIROrder();

5508 if (orderNo3 > orderNo) {

5509 opIsLive = true;

5510 break;

5511 }

5512 }

5513

5514 if (!opIsLive)

5516 int orderNo3 = User->getIROrder();

5517 if (orderNo3 > orderNo) {

5518 opIsLive = true;

5519 break;

5520 }

5521 }

5522

5523 if (!opIsLive)

5525 }

5526

5529 }

5530

5532}

5533

5534

5535

5536

5537

5538

5539

5540

5541

5542

5543

5544

5545

5548

5551

5552 EVT ElementVT = N->getValueType(0);

5553

5556

5557

5558

5560

5561 if (U.getValueType() == MVT::Glue || U.getValueType() == MVT::Other)

5562 return true;

5563 if (U.getUser()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {

5564 if (N->getOpcode() != ISD::LOAD)

5565 return true;

5566

5567

5568

5569

5570

5571

5572

5573

5574

5575

5576

5577

5578

5579

5580

5581 return !U.getUser()->use_empty();

5582 }

5583

5584

5585 return false;

5586 }))

5588

5591

5592

5593 unsigned Opcode;

5595 unsigned OldNumOutputs;

5596 switch (LD->getOpcode()) {

5597 case ISD::LOAD:

5598 OldNumOutputs = 1;

5599

5600

5601

5603

5604

5605 Operands.push_back(DCI.DAG.getConstant(UINT32_MAX, DL, MVT::i32));

5606 Operands.push_back(DCI.DAG.getIntPtrConstant(

5608 break;

5610 OldNumOutputs = 2;

5612 break;

5614

5615

5616 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)

5618 OldNumOutputs = 4;

5620 break;

5622

5624 }

5625

5626

5627 const unsigned NewNumOutputs = OldNumOutputs * 2;

5628 SmallVector NewVTs(NewNumOutputs, ElementVT.getVectorElementType());

5629

5630 NewVTs.append(LD->value_begin() + OldNumOutputs, LD->value_end());

5631

5632

5633 SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(

5634 Opcode, DL, DCI.DAG.getVTList(NewVTs), Operands, LD->getMemoryVT(),

5635 LD->getMemOperand());

5636

5637

5638

5639

5641 for (unsigned I : seq(OldNumOutputs))

5642 Results.push_back(DCI.DAG.getBuildVector(

5643 ElementVT, DL, {NewLoad.getValue(I * 2), NewLoad.getValue(I * 2 + 1)}));

5644

5645 for (unsigned I : seq(NewLoad->getNumValues() - NewNumOutputs))

5647

5648 return DCI.DAG.getMergeValues(Results, DL);

5649}

5650

5651

5652

5653

5654

5655

5656

5657

5658

5659

5660

5663 unsigned Front, unsigned Back) {

5664

5665

5668

5669

5670 EVT ElementVT = N->getOperand(Front).getValueType();

5671

5672

5675

5677

5678

5679 unsigned Opcode;

5680 switch (N->getOpcode()) {

5681 case ISD::STORE:

5682

5683

5684

5686 break;

5689 break;

5691

5692

5693 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)

5696 break;

5698

5700 default:

5702 }

5703

5704

5705

5707 for (SDValue BV : N->ops().drop_front(Front).drop_back(Back)) {

5710

5711

5712

5713 if (!BV.hasOneUse())

5715

5716

5717

5719

5720 if (Op.getOpcode() == ISD::BITCAST)

5721 Op = Op.getOperand(0);

5722

5723

5724 if (Op.getValueType() == MVT::i16 && Op.getOpcode() == ISD::TRUNCATE &&

5725 Op->getOperand(0).getValueType() == MVT::i32)

5727

5728

5731 }

5732 Operands.append({BV.getOperand(0), BV.getOperand(1)});

5733 }

5734 Operands.append(N->op_end() - Back, N->op_end());

5735

5736

5738 ST->getMemoryVT(), ST->getMemOperand());

5739}

5740

5743

5745

5746

5747

5749 if (!ST->getValue().getValueType().isSimple())

5751 }

5752

5754}

5755

5759

5760

5761

5762 if (N->getValueType(0).isSimple())

5764 }

5765

5767}

5768

5769

5770

5776

5777 SDValue N0 = N->getOperand(0);

5778 SDValue N1 = N->getOperand(1);

5779

5780

5782 if (VT.isVector() || VT != MVT::i32)

5784

5785

5787 return Result;

5788

5789

5791}

5792

5793

5794

5798 SDValue N0 = N->getOperand(0);

5799 SDValue N1 = N->getOperand(1);

5800

5802 if (VT.isVector() || !(VT == MVT::f32 || VT == MVT::f64))

5804

5805

5807 return Result;

5808

5809

5811}

5812

5813

5815 switch (MinMax2Opcode) {

5816 case ISD::FMAXNUM:

5817 case ISD::FMAXIMUMNUM:

5818 return NVPTXISD::FMAXNUM3;

5819 case ISD::FMINNUM:

5820 case ISD::FMINIMUMNUM:

5821 return NVPTXISD::FMINNUM3;

5822 case ISD::FMAXIMUM:

5823 return NVPTXISD::FMAXIMUM3;

5824 case ISD::FMINIMUM:

5825 return NVPTXISD::FMINIMUM3;

5826 default:

5828 }

5829}

5830

5831

5832

5835 unsigned PTXVersion, unsigned SmVersion) {

5836

5837

5838 EVT VT = N->getValueType(0);

5839 if (VT != MVT::f32 || PTXVersion < 88 || SmVersion < 100)

5841

5842 SDValue Op0 = N->getOperand(0);

5843 SDValue Op1 = N->getOperand(1);

5844 unsigned MinMaxOp2 = N->getOpcode();

5846

5848

5854

5859 }

5861}

5862

5867

5868

5871

5874 EVT VT = N->getValueType(0);

5875 bool IsSigned = N->getOpcode() == ISD::SREM;

5877

5878 const SDValue &Num = N->getOperand(0);

5879 const SDValue &Den = N->getOperand(1);

5880

5882 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&

5884

5887 DAG.getNode(DivOpc, DL, VT, Num, Den),

5888 Den));

5889 }

5890 }

5892}

5893

5894

5899

5901 if (Op.hasOneUse())

5903 EVT ToVT = N->getValueType(0);

5904 EVT FromVT = Op.getValueType();

5905 if (!((ToVT == MVT::i32 && FromVT == MVT::i16) ||

5906 (ToVT == MVT::i64 && FromVT == MVT::i32)))

5911

5913 unsigned ExtOpcode = N->getOpcode();

5914 unsigned Opcode = 0;

5915 if (ExtOpcode == ISD::SIGN_EXTEND && Op->getFlags().hasNoSignedWrap())

5916 Opcode = NVPTXISD::MUL_WIDE_SIGNED;

5917 else if (ExtOpcode == ISD::ZERO_EXTEND && Op->getFlags().hasNoUnsignedWrap())

5918 Opcode = NVPTXISD::MUL_WIDE_UNSIGNED;

5919 else

5923 const auto ShiftAmt = Op.getConstantOperandVal(1);

5926 }

5927 return DCI.DAG.getNode(Opcode, DL, ToVT, Op.getOperand(0), RHS);

5928}

5929

5935

5936

5937

5938

5940 unsigned OptSize,

5943

5946 EVT OrigVT = Op.getOperand(0).getValueType();

5949 return true;

5950 }

5952 EVT OrigVT = Op.getOperand(0).getValueType();

5955 return true;

5956 }

5957 }

5958

5959 return false;

5960}

5961

5962

5963

5964

5965

5967 unsigned OptSize,

5968 bool &IsSigned) {

5970

5971

5973 return false;

5974

5975

5977 return false;

5978

5979 IsSigned = (LHSSign == Signed);

5980

5981

5983 const APInt &Val = CI->getAPIntValue();

5985 return Val.isIntN(OptSize);

5986 } else {

5988 }

5989 } else {

5992 return false;

5993

5994 return LHSSign == RHSSign;

5995 }

5996}

5997

5998

5999

6000

6001

6004 EVT MulType = N->getValueType(0);

6005 if (MulType != MVT::i32 && MulType != MVT::i64) {

6007 }

6008

6010 unsigned OptSize = MulType.getSizeInBits() >> 1;

6013

6014

6015 if (N->getOpcode() == ISD::MUL) {

6018 }

6019 }

6020

6021

6022 if (N->getOpcode() == ISD::SHL) {

6024 if (!ShlRHS) {

6026 }

6027

6033 } else {

6035 }

6036 }

6037

6039

6042 }

6043

6044 EVT DemotedVT;

6045 if (MulType == MVT::i32) {

6046 DemotedVT = MVT::i16;

6047 } else {

6048 DemotedVT = MVT::i32;

6049 }

6050

6051

6052

6057

6058 unsigned Opc;

6060 Opc = NVPTXISD::MUL_WIDE_SIGNED;

6061 } else {

6062 Opc = NVPTXISD::MUL_WIDE_UNSIGNED;

6063 }

6064

6065 return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);

6066}

6067

6070 return Const && Const->getZExtValue() == 1;

6071}

6072

6076

6078 return Add->getOperand(1);

6079

6081 return Add->getOperand(0);

6082

6084}

6085

6096

6102

6104

6105 unsigned ConstOpNo;

6107 ConstOpNo = 1;

6109 ConstOpNo = 2;

6110 else

6112

6113 SDValue Y = Select->getOperand((ConstOpNo == 1) ? 2 : 1);

6114

6115

6118

6120

6122 (ConstOpNo == 1) ? X : NewMul,

6123 (ConstOpNo == 1) ? NewMul : X);

6124}

6125

6129

6133

6134 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)

6136

6138

6139

6141 return Res;

6143 return Res;

6144

6145

6147 return Res;

6149 return Res;

6150

6152}

6153

6154

6160

6162 return Ret;

6163

6164 SDValue N0 = N->getOperand(0);

6165 SDValue N1 = N->getOperand(1);

6167}

6168

6169

6174

6176 return Ret;

6177 }

6178

6180}

6181

6184 unsigned int SmVersion) {

6185 EVT CCType = N->getValueType(0);

6188

6189 EVT AType = A.getValueType();

6190 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))

6192

6193 if (A.getValueType() == MVT::v2bf16 && SmVersion < 90)

6195

6197

6198

6199

6200

6204 DL, DCI.DAG.getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});

6207}

6208

6215 EVT VectorVT = Vector.getValueType();

6216 if (Vector->getOpcode() == ISD::LOAD && VectorVT.isSimple() &&

6218 return SDValue();

6219

6220

6221

6225

6226

6229

6231

6232 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))

6234

6236

6237 if (!Index || Index->getZExtValue() == 0)

6239

6244

6249 DCI.DAG.getConstant(Index->getZExtValue() * EltBits, DL, IVT)));

6250

6251

6252 if (EltVT != EltIVT)

6253 Result = DCI.DAG.getNode(ISD::BITCAST, DL, EltVT, Result);

6254

6255 if (EltVT != N->getValueType(0))

6257

6258 return Result;

6259}

6260

6263 SDValue VA = N->getOperand(1);

6265 if (VectorVT != MVT::v4i8)

6267

6268

6269

6270

6271

6274 SDValue VCond = N->getOperand(0);

6275 SDValue VB = N->getOperand(2);

6276 for (int I = 0; I < 4; ++I) {

6282 DL, MVT::i32);

6286 DL, MVT::i32);

6289 }

6291}

6292

6295 auto VT = N->getValueType(0);

6297

6301

6302 auto Op0 = N->getOperand(0);

6303 auto Op1 = N->getOperand(1);

6304

6305

6306

6309

6310 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},

6311 {&Op1, &Op1Bytes}};

6312

6313

6314

6315

6316 for (auto &[Op, OpBytes] : OpData) {

6317

6318 if (Op->getOpcode() == ISD::BITCAST)

6319 *Op = Op->getOperand(0);

6320

6321 if (!(Op->getValueType() == MVT::i16 && Op->getOpcode() == ISD::TRUNCATE &&

6322 Op->getOperand(0).getValueType() == MVT::i32))

6324

6325

6326

6327 if (Op->hasOneUse())

6329

6330 *Op = Op->getOperand(0);

6331

6332

6333

6336

6337

6338 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&

6339 "PRMT selector values out of range");

6340 *OpBytes += 0x22;

6341 *Op = Op->getOperand(0);

6342 }

6343 }

6344 }

6345

6347 auto &DAG = DCI.DAG;

6348

6349 auto PRMT =

6351 (Op1Bytes << 8) | Op0Bytes, DL, DAG);

6353}

6354

6358

6360 assert(ASCN2->getDestAddressSpace() == ASCN1->getSrcAddressSpace());

6361

6362

6363 if (ASCN1->getDestAddressSpace() == ASCN2->getSrcAddressSpace())

6364 return ASCN2->getOperand(0);

6365 }

6366

6368}

6369

6370

6371

6372

6373

6375 assert(Selector.getBitWidth() == 32 && "PRMT must have i32 operands");

6376

6378 return Selector;

6379

6381

6382 const auto GetSelector = [](unsigned S0, unsigned S1, unsigned S2,

6383 unsigned S3) {

6384 return APInt(32, S0 | (S1 << 4) | (S2 << 8) | (S3 << 12));

6385 };

6386

6387 switch (Mode) {

6389 return GetSelector(V, V + 1, V + 2, V + 3);

6391 return GetSelector(V, (V - 1) & 7, (V - 2) & 7, (V - 3) & 7);

6393 return GetSelector(V, V, V, V);

6395 return GetSelector(V, std::max(V, 1U), std::max(V, 2U), 3U);

6397 return GetSelector(0, std::min(V, 1U), std::min(V, 2U), V);

6399 unsigned V1 = (V & 1) << 1;

6400 return GetSelector(V1, V1 + 1, V1, V1 + 1);

6401 }

6402 default:

6404 }

6405}

6406

6408 assert(A.getBitWidth() == 32 && B.getBitWidth() == 32 &&

6409 Selector.getBitWidth() == 32 && "PRMT must have i32 operands");

6410

6411 APInt BitField = B.concat(A);

6413 APInt Result(32, 0);

6418 APInt Byte = BitField.extractBits(8, Idx * 8);

6419 if (Sign)

6420 Byte = Byte.ashr(8);

6421 Result.insertBits(Byte, I * 8);

6422 }

6423 return Result;

6424}

6425

6430

6431

6436 N->getConstantOperandAPInt(1),

6437 N->getConstantOperandAPInt(2),

6438 N->getConstantOperandVal(3)),

6439 SDLoc(N), N->getValueType(0));

6441}

6442

6443

6444

6445

6446

6447

6448

6449

6450

6451

6454 switch (R.getOpcode()) {

6459 case ISD::BITCAST: {

6461 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), V);

6463 }

6470 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), A, B);

6472 }

6474 return R;

6475 case ISD::LOAD:

6478 return DCI.DAG.getNode(NVPTXISD::ProxyReg, SDLoc(R), R.getValueType(),

6479 {Chain, R});

6480 }

6484

6486 for (auto &Op : R->ops()) {

6488 if (!V)

6490 Ops.push_back(V);

6491 }

6493 }

6497

6500 R.getValueType(), V, R.getOperand(1));

6502 }

6503 default:

6505 }

6506}

6507

6510

6511 SDValue Chain = N->getOperand(0);

6513

6514

6515

6516 if (Reg.getOpcode() != ISD::LOAD) {

6518 return V;

6519 }

6520

6522}

6523

6524SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,

6525 DAGCombinerInfo &DCI) const {

6527 switch (N->getOpcode()) {

6528 default:

6529 break;

6532 case ISD::ADDRSPACECAST:

6543 case ISD::FMAXNUM:

6544 case ISD::FMINNUM:

6545 case ISD::FMAXIMUM:

6546 case ISD::FMINIMUM:

6547 case ISD::FMAXIMUMNUM:

6548 case ISD::FMINIMUMNUM:

6550 STI.getSmVersion());

6551 case ISD::LOAD:

6557 case NVPTXISD::PRMT:

6559 case NVPTXISD::ProxyReg:

6568 case ISD::STORE:

6574 }

6576}

6577

6580

6581

6583 EVT ToVT = Op->getValueType(0);

6584 if (ToVT != MVT::v2i8) {

6585 return;

6586 }

6587

6588

6598}

6599

6602 SDValue Chain = N->getOperand(0);

6603 SDValue Intrin = N->getOperand(1);

6605

6606

6608 switch (IntrinNo) {

6609 default:

6610 return;

6611 case Intrinsic::nvvm_ldu_global_i:

6612 case Intrinsic::nvvm_ldu_global_f:

6613 case Intrinsic::nvvm_ldu_global_p: {

6614 EVT ResVT = N->getValueType(0);

6615

6617

6618

6621

6622

6623

6624

6625

6626 bool NeedTrunc = false;

6628 EltVT = MVT::i16;

6629 NeedTrunc = true;

6630 }

6631

6632 unsigned Opcode = 0;

6634

6635 switch (NumElts) {

6636 default:

6637 return;

6638 case 2:

6640 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);

6641 break;

6642 case 4: {

6644 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };

6645 LdResVTs = DAG.getVTList(ListVTs);

6646 break;

6647 }

6648 }

6649

6651

6652

6653

6654 OtherOps.push_back(Chain);

6655

6656

6657 OtherOps.append(N->op_begin() + 2, N->op_end());

6658

6660

6664

6666

6667 for (unsigned i = 0; i < NumElts; ++i) {

6669 if (NeedTrunc)

6670 Res =

6673 }

6674

6676

6679

6680 Results.push_back(BuildVec);

6681 Results.push_back(LoadChain);

6682 } else {

6683

6685 "Custom handling of non-i8 ldu/ldg?");

6686

6687

6689

6690

6692

6694

6695

6696

6700

6704 }

6705 return;

6706 }

6707

6708 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:

6709 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:

6710 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:

6711 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:

6712 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:

6713 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:

6714 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:

6715 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:

6716 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:

6717 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:

6718 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:

6719 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:

6720 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:

6721 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:

6722 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:

6723 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:

6724 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:

6725 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:

6726 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:

6727 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:

6728 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:

6729 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:

6730 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:

6731 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:

6733 Results.push_back(Res->first);

6734 Results.push_back(Res->second);

6735 }

6736 return;

6737

6738 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:

6739 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:

6740 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:

6741 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:

6742 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:

6743 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:

6744 if (auto Res = lowerTcgen05Ld(N, DAG, true)) {

6745 Results.push_back(Res->first);

6746 Results.push_back(Res->second);

6747 }

6748 return;

6749 }

6750}

6751

6754

6755

6757 SDValue Chain = N->getOperand(0);

6759 SDValue Glue = N->getOperand(2);

6760

6761 assert(Reg.getValueType() == MVT::i128 &&

6762 "Custom lowering for CopyFromReg with 128-bit reg only");

6764 N->getValueType(2)};

6766

6770

6771 Results.push_back(Pair);

6774}

6775

6779 SDValue Chain = N->getOperand(0);

6781

6783

6786 DAG.getNode(NVPTXISD::ProxyReg, SDLoc(N), VT, {Chain, NewReg});

6788

6790}

6791

6795 assert(N->getValueType(0) == MVT::i128 &&

6796 "Custom lowering for atomic128 only supports i128");

6797

6800

6804 "Support for b128 atomics introduced in PTX ISA version 8.3 and "

6805 "requires target sm_90.",

6807

6810 return;

6811 }

6812

6816 for (const auto &Op : AN->ops().drop_front(2)) {

6817

6820

6823 }

6824 unsigned Opcode = N->getOpcode() == ISD::ATOMIC_SWAP

6831 {Result.getValue(0), Result.getValue(1)}));

6832 Results.push_back(Result.getValue(2));

6833}

6834

6835void NVPTXTargetLowering::ReplaceNodeResults(

6837 switch (N->getOpcode()) {

6838 default:

6840 case ISD::BITCAST:

6842 return;

6843 case ISD::LOAD:

6844 case ISD::MLOAD:

6846 return;

6849 return;

6852 return;

6853 case NVPTXISD::ProxyReg:

6855 return;

6856 case ISD::ATOMIC_CMP_SWAP:

6857 case ISD::ATOMIC_SWAP:

6859 return;

6860 }

6861}

6862

6866

6869 if (Ty->isHalfTy() && STI.getSmVersion() >= 70 &&

6870 STI.getPTXVersion() >= 63)

6872 if (Ty->isBFloatTy() && STI.getSmVersion() >= 90 &&

6873 STI.getPTXVersion() >= 78)

6875 if (Ty->isFloatTy())

6877 if (Ty->isDoubleTy() && STI.hasAtomAddF64())

6879 }

6881 }

6882

6883 assert(Ty->isIntegerTy() && "Ty should be integer at this point");

6885

6887 default:

6892 [[fallthrough]];

6897 case 8:

6898 case 16:

6900 case 32:

6902 case 64:

6903 if (STI.hasAtomBitwise64())

6906 case 128:

6908 default:

6910 }

6918 case 8:

6919 case 16:

6921 case 32:

6923 case 64:

6924 if (STI.hasAtomMinMax64())

6927 case 128:

6929 default:

6931 }

6935 case 32:

6937 case 8:

6938 case 16:

6939 case 64:

6940 case 128:

6942 default:

6944 }

6945 }

6946

6948}

6949

6953

6954

6955

6956

6957

6958

6959

6960

6961 return CI &&

6962 (cast(CI->getCompareOperand()->getType())->getBitWidth() <

6963 STI.getMinCmpXchgSizeInBits() ||

6965}

6966

6970 bool BitwidthSupportedAndIsSeqCst =

6972 cast(CI->getCompareOperand()->getType())->getBitWidth() >=

6973 STI.getMinCmpXchgSizeInBits();

6976}

6977

6983

6984

6985

6989 ? Ord

6991 SSID);

6992

6993 return nullptr;

6994}

6995

6999

7002

7004 auto CASWidth =

7005 cast(CI->getCompareOperand()->getType())->getBitWidth();

7007

7010 CASWidth < STI.getMinCmpXchgSizeInBits()))

7012

7013 return nullptr;

7014}

7015

7016

7017

7018

7019

7021 EVT ToVT) const {

7023 return Op;

7024 switch (Op) {

7028 break;

7032 break;

7033 case ISD::VP_FP_TO_UINT:

7035 return ISD::VP_FP_TO_SINT;

7036 break;

7037 default:

7038 break;

7039 }

7040 return Op;

7041}

7042

7043

7045

7050

7056 unsigned Mode = Op.getConstantOperandVal(3);

7057

7058 if (!Selector)

7059 return;

7060

7063

7064

7066 "PRMT must have i32 operands");

7067 assert(Known.getBitWidth() == 32 && "PRMT must have i32 result");

7069

7075 KnownBits Byte = BitField.extractBits(8, Idx * 8);

7076 if (Sign)

7079 }

7080}

7081

7084

7085

7086 auto ExtType = LD->getConstantOperandVal(LD->getNumOperands() - 1);

7088 return;

7089

7090

7091 auto DestVT = LD->getValueType(0);

7092 if (DestVT.isVector())

7093 return;

7094

7098}

7099

7104

7105 switch (Op.getOpcode()) {

7106 case NVPTXISD::PRMT:

7108 break;

7113 break;

7114 default:

7115 break;

7116 }

7117}

7118

7123

7125 if (DemandedBits.extractBits(8, I * 8).isZero())

7126 continue;

7127

7131

7132 APInt &Src = Idx < 4 ? DemandedLHS : DemandedRHS;

7133 unsigned ByteStart = (Idx % 4) * 8;

7134 if (Sign)

7135 Src.setBit(ByteStart + 7);

7136 else

7137 Src.setBits(ByteStart, ByteStart + 8);

7138 }

7139

7140 return {DemandedLHS, DemandedRHS};

7141}

7142

7143

7144

7146 if (Op)

7148 if (Op.isUndef())

7150 return Op;

7151}

7152

7157 unsigned Depth) {

7162 if (!SelectorConst)

7164

7167

7168

7169

7170 const unsigned LeadingBytes = DemandedBits.countLeadingZeros() / 8;

7171 const unsigned SelBits = (4 - LeadingBytes) * 4;

7172 if (Selector.getLoBits(SelBits) == APInt(32, 0x3210).getLoBits(SelBits))

7173 return Op0;

7174 if (Selector.getLoBits(SelBits) == APInt(32, 0x7654).getLoBits(SelBits))

7175 return Op1;

7176

7178

7179

7184

7187 if ((DemandedOp0 && DemandedOp0 != Op0) ||

7188 (DemandedOp1 && DemandedOp1 != Op1)) {

7189 Op0 = DemandedOp0 ? DemandedOp0 : Op0;

7190 Op1 = DemandedOp1 ? DemandedOp1 : Op1;

7192 }

7193

7195}

7196

7201

7202 switch (Op.getOpcode()) {

7203 case NVPTXISD::PRMT:

7205 *this, Depth)) {

7207 return true;

7208 }

7209 break;

7210 default:

7211 break;

7212 }

7213

7215 return false;

7216}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

AMDGPU Register Bank Select

This file declares a class to represent arbitrary precision floating point values and provide a varie...

This file implements a class to represent arbitrary precision integral constant values and operations...

static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)

PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.

static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)

PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.

static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)

static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)

static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)

static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)

PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Function Alias Analysis Results

Atomic ordering constants.

This file contains the simple types necessary to represent the attributes associated with functions a...

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

#define clEnumValN(ENUMVAL, FLAGNAME, DESC)

This file contains the declarations for the subclasses of Constant, which represent the different fla...

This file contains the declarations of entities that describe floating point environment and related ...

shuff Hexagon Optimize Shuffle Vector

Module.h This file contains the declarations for the Module class.

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)

Return the first DebugLoc that has line number information, given a range of instructions.

Register const TargetRegisterInfo * TRI

NVPTX address space definition.

static bool shouldConvertToIndirectCall(const CallBase *CB, const GlobalAddressSDNode *Func)

Definition NVPTXISelLowering.cpp:1313

static SDValue combineADDRSPACECAST(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)

Definition NVPTXISelLowering.cpp:6355

static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))

static SDValue lowerTcgen05St(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:2551

static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)

Definition NVPTXISelLowering.cpp:6209

static cl::opt< NVPTX::DivPrecisionLevel > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specific: Override the precision of the lowering for f32 fdiv"), cl::values(clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"), clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754, "2", "Use IEEE Compliant F32 div.rnd if available (default)"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754_NoFTZ, "3", "Use IEEE Compliant F32 div.rnd if available, no FTZ")), cl::init(NVPTX::DivPrecisionLevel::IEEE754))

static bool isConstOne(const SDValue &Operand)

Definition NVPTXISelLowering.cpp:6068

static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))

static bool IsPTXVectorType(MVT VT)

Definition NVPTXISelLowering.cpp:155

static SDValue lowerLOADi1(LoadSDNode *LD, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:3623

static SDValue lowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:2749

static MachinePointerInfo refinePtrAS(SDValue &Ptr, SelectionDAG &DAG, const DataLayout &DL, const TargetLowering &TL)

Definition NVPTXISelLowering.cpp:1322

static SDValue lowerROT(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:3069

static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, LLVMContext &Ctx, CallingConv::ID CallConv, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > &Offsets, uint64_t StartingOffset=0)

ComputePTXValueVTs - For the given Type Ty, returns the set of primitive legal-ish MVTs that compose ...

Definition NVPTXISelLowering.cpp:302

static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)

Definition NVPTXISelLowering.cpp:6578

static void replaceAtomicSwap128(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI, SmallVectorImpl< SDValue > &Results)

Definition NVPTXISelLowering.cpp:6792

static unsigned getMinMax3Opcode(unsigned MinMax2Opcode)

Get 3-input version of a 2-input min/max opcode.

Definition NVPTXISelLowering.cpp:5814

static SDValue lowerSTOREVector(SDValue Op, SelectionDAG &DAG, const NVPTXSubtarget &STI)

Definition NVPTXISelLowering.cpp:3694

static SDValue lowerLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)

Definition NVPTXISelLowering.cpp:3612

static void replaceProxyReg(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &Results)

Definition NVPTXISelLowering.cpp:6776

static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)

Definition NVPTXISelLowering.cpp:6752

static SDValue lowerCTLZCTPOP(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:3010

static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)

Definition NVPTXISelLowering.cpp:6086

static SDValue combinePRMT(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)

Definition NVPTXISelLowering.cpp:6426

static SDValue combinePackingMovIntoStore(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned Front, unsigned Back)

Fold packing movs into a store.

Definition NVPTXISelLowering.cpp:5661

static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)

Definition NVPTXISelLowering.cpp:6600

static SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl, SelectionDAG &DAG, T GetElement)

Definition NVPTXISelLowering.cpp:368

static SDValue getExtractVectorizedValue(SDValue V, unsigned I, EVT VT, const SDLoc &dl, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:351

static unsigned canMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment)

Definition NVPTXISelLowering.cpp:426

static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C)

Definition NVPTXISelLowering.cpp:342

static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:2974

static SDValue PerformFMinMaxCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned PTXVersion, unsigned SmVersion)

PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into (fmaxnum3 a, b, c).

Definition NVPTXISelLowering.cpp:5833

static SDValue combineMulWide(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)

Definition NVPTXISelLowering.cpp:5895

static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)

Definition NVPTXISelLowering.cpp:5456

static std::optional< unsigned > getScalar3OpcodeForReduction(unsigned ReductionOpcode)

Get 3-input scalar reduction opcode.

Definition NVPTXISelLowering.cpp:1950

static SDValue lowerIntrinsicWChain(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:2953

static bool isConstZero(const SDValue &Operand)

Definition NVPTXISelLowering.cpp:5406

static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:2531

static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:2675

static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)

IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...

Definition NVPTXISelLowering.cpp:5939

static unsigned getTcgen05MMADisableOutputLane(unsigned IID)

Definition NVPTXISelLowering.cpp:2615

static std::pair< APInt, APInt > getPRMTDemandedBits(const APInt &SelectorVal, const APInt &DemandedBits)

Definition NVPTXISelLowering.cpp:7119

static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode)

Definition NVPTXISelLowering.cpp:6407

static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode)

Definition NVPTXISelLowering.cpp:1933

static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)

Definition NVPTXISelLowering.cpp:5863

static SDValue lowerBSWAP(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:2577

static SDValue lowerMSTORE(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:3137

static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)

Definition NVPTXISelLowering.cpp:6127

static void computeKnownBitsForPRMT(const SDValue Op, KnownBits &Known, const SelectionDAG &DAG, unsigned Depth)

Definition NVPTXISelLowering.cpp:7051

static SDValue combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)

Fold unpacking movs into a load by increasing the number of return values.

Definition NVPTXISelLowering.cpp:5547

static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:2829

static std::optional< std::pair< SDValue, SDValue > > lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset=false)

Definition NVPTXISelLowering.cpp:2706

static SDValue lowerCvtRSIntrinsics(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:2869

static std::optional< std::pair< SDValue, SDValue > > replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)

replaceLoadVector - Convert vector loads into multi-output scalar loads.

Definition NVPTXISelLowering.cpp:3503

static SDValue expandFSH64(SDValue A, SDValue B, SDValue ShiftAmount, SDLoc DL, unsigned Opcode, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:3020

static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)

AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...

Definition NVPTXISelLowering.cpp:5966

static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)

TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...

Definition NVPTXISelLowering.cpp:6002

static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:2924

static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)

Definition NVPTXISelLowering.cpp:6097

static SDValue buildTreeReduction(const SmallVector< SDValue > &Elements, EVT EltTy, ArrayRef< std::pair< unsigned, unsigned > > Ops, const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG)

Reduces the elements using the scalar operations provided.

Definition NVPTXISelLowering.cpp:1886

static SDValue combineProxyReg(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)

Definition NVPTXISelLowering.cpp:6508

static SmallVector< unsigned, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment, bool IsVAArg=false)

Definition NVPTXISelLowering.cpp:480

static SDValue getPRMT(SDValue A, SDValue B, SDValue Selector, SDLoc DL, SelectionDAG &DAG, unsigned Mode=NVPTX::PTXPrmtMode::NONE)

Definition NVPTXISelLowering.cpp:1866

static std::pair< MemSDNode *, uint32_t > convertMLOADToLoadWithUsedBytesMask(MemSDNode *N, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:3456

static SDValue matchMADConstOnePattern(SDValue Add)

Definition NVPTXISelLowering.cpp:6073

static SDValue correctParamType(SDValue V, EVT ExpectedVT, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)

Definition NVPTXISelLowering.cpp:1354

static ISD::NodeType getExtOpcode(const ISD::ArgFlagsTy &Flags)

Definition NVPTXISelLowering.cpp:1346

static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))

static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known)

Definition NVPTXISelLowering.cpp:7082

static APInt getPRMTSelector(const APInt &Selector, unsigned Mode)

Definition NVPTXISelLowering.cpp:6374

static EVT promoteScalarIntegerPTX(const EVT VT)

PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...

Definition NVPTXISelLowering.cpp:392

static SDValue simplifyDemandedBitsForPRMT(SDValue PRMT, const APInt &DemandedBits, SelectionDAG &DAG, const TargetLowering &TLI, unsigned Depth)

Definition NVPTXISelLowering.cpp:7153

static SDValue lowerFREM(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:3075

static SDValue canonicalizePRMTInput(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:7145

static SDValue sinkProxyReg(SDValue R, SDValue Chain, TargetLowering::DAGCombinerInfo &DCI)

Definition NVPTXISelLowering.cpp:6452

static SDValue lowerFSH(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:3064

static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:2415

OperandSignedness

Definition NVPTXISelLowering.cpp:5930

@ Unknown

Definition NVPTXISelLowering.cpp:5933

@ Unsigned

Definition NVPTXISelLowering.cpp:5932

@ Signed

Definition NVPTXISelLowering.cpp:5931

static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)

Definition NVPTXISelLowering.cpp:6182

static std::optional< std::pair< unsigned int, MVT > > getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, unsigned AddressSpace)

Definition NVPTXISelLowering.cpp:200

static cl::opt< bool > ForceMinByValParamAlign("nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" " params of device functions."), cl::init(false))

static cl::opt< bool > UseApproxLog2F32("nvptx-approx-log2f32", cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), cl::init(false))

Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it does NOT use lg2....

static SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG)

Definition NVPTXISelLowering.cpp:3103

static SDValue combineLOAD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)

Definition NVPTXISelLowering.cpp:5756

static SDValue combineSTORE(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)

Definition NVPTXISelLowering.cpp:5741

static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)

PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.

Definition NVPTXISelLowering.cpp:6170

MachineInstr unsigned OpIdx

const SmallVectorImpl< MachineOperand > & Cond

static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))

This file defines the SmallVector class.

static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")

static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")

This file describes how to lower LLVM code to machine code.

static const fltSemantics & IEEEsingle()

static APFloat getInf(const fltSemantics &Sem, bool Negative=false)

Factory for Positive and Negative Infinity.

Class for arbitrary precision integers.

LLVM_ABI APInt getLoBits(unsigned numBits) const

Compute an APInt containing numBits lowbits from this APInt.

uint64_t getZExtValue() const

Get zero extended value.

void setHighBits(unsigned hiBits)

Set the top hiBits bits.

LLVM_ABI APInt getHiBits(unsigned numBits) const

Compute an APInt containing numBits highbits from this APInt.

LLVM_ABI APInt trunc(unsigned width) const

Truncate to new width.

void setBit(unsigned BitPosition)

Set the given bit to 1 whose position is given as "bitPosition".

unsigned getBitWidth() const

Return the number of bits in the APInt.

bool isSignedIntN(unsigned N) const

Check if this APInt has an N-bits signed integer value.

bool slt(const APInt &RHS) const

Signed less than comparison.

LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const

Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).

bool isIntN(unsigned N) const

Check if this APInt has an N-bits unsigned integer value.

bool sge(const APInt &RHS) const

Signed greater or equal comparison.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

const T & back() const

back - Get the last element.

ArrayRef< T > drop_back(size_t N=1) const

Drop the last N elements of the array.

bool empty() const

empty - Check if the array is empty.

ArrayRef< T > slice(size_t N, size_t M) const

slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.

an instruction that atomically reads a memory location, combines it with another value,...

@ Min

*p = old <signed v ? old : v

@ UIncWrap

Increment one up to a maximum value.

@ Max

*p = old >signed v ? old : v

@ UMin

*p = old <unsigned v ? old : v

@ UMax

*p = old >unsigned v ? old : v

@ UDecWrap

Decrement one until a minimum value or zero.

bool isFloatingPointOperation() const

BinOp getOperation() const

This is an SDNode representing atomic operations.

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

FunctionType * getFunctionType() const

const APInt & getAPIntValue() const

static LLVM_ABI Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

uint64_t getNumOperands() const

A parsed version of the target data layout string in and methods for querying it.

LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const

Returns the offset in bytes between successive objects of the specified type, including alignment pad...

LLVM_ABI Align getPrefTypeAlign(Type *Ty) const

Returns the preferred stack/global alignment for the specified type.

Diagnostic information for unsupported feature in backend.

void addFnAttr(Attribute::AttrKind Kind)

Add function attributes to this function.

Common base class shared among various IRBuilders.

This is an important class for using LLVM in a threaded context.

LLVM_ABI void diagnose(const DiagnosticInfo &DI)

Report a message to the currently installed diagnostic handler.

This class is used to represent ISD::LOAD nodes.

MCSection * getDataSection() const

static constexpr unsigned NoRegister

Instances of this class represent a uniqued identifier for a section in the current translation unit.

StringRef getName() const

getName - Get the symbol name.

static auto integer_fixedlen_vector_valuetypes()

unsigned getVectorNumElements() const

bool isVector() const

Return true if this is a vector value type.

bool isScalableVector() const

Return true if this is a vector value type where the runtime length is machine dependent.

static auto integer_valuetypes()

TypeSize getSizeInBits() const

Returns the size of the specified MVT in bits.

static auto fixedlen_vector_valuetypes()

TypeSize getStoreSize() const

Return the number of bytes overwritten by a store of the specified value type.

static MVT getVectorVT(MVT VT, unsigned NumElements)

MVT getVectorElementType() const

static MVT getIntegerVT(unsigned BitWidth)

static auto fp_valuetypes()

MVT getScalarType() const

If this is a vector, return the element type, otherwise return this.

static auto fp_fixedlen_vector_valuetypes()

DenormalMode getDenormalMode(const fltSemantics &FPType) const

Returns the denormal handling type for the default rounding mode of the function.

Function & getFunction()

Return the LLVM function that this machine code represents.

const MachineJumpTableInfo * getJumpTableInfo() const

getJumpTableInfo - Return the jump table info object for the current function.

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

@ EK_Inline

EK_Inline - Jump table entries are emitted inline at their point of use.

const std::vector< MachineJumpTableEntry > & getJumpTables() const

@ MODereferenceable

The memory access is dereferenceable (i.e., doesn't trap).

@ MOLoad

The memory access reads data.

@ MOInvariant

The memory access always returns the same value (or traps).

@ MOStore

The memory access writes data.

This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.

This is an abstract virtual class for memory operations.

MachineMemOperand * getMemOperand() const

Return a MachineMemOperand object describing the memory reference performed by operation.

EVT getMemoryVT() const

Return the type of the in-memory value.

static unsigned getFromTypeWidthForLoad(const MemSDNode *Mem)

bool hasAtomSwap128() const

bool hasF32x2Instructions() const

bool has256BitVectorLoadStore(unsigned AS) const

AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override

Definition NVPTXISelLowering.cpp:6967

ConstraintType getConstraintType(StringRef Constraint) const override

getConstraintType - Given a constraint letter, return the type of constraint it is for this target.

Definition NVPTXISelLowering.cpp:5335

SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override

This callback is invoked for operations that are unsupported by the target, which are registered to u...

Definition NVPTXISelLowering.cpp:3218

const NVPTXTargetMachine * nvTM

bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override

Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...

Definition NVPTXISelLowering.cpp:4080

bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override

Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.

Definition NVPTXISelLowering.cpp:7197

NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)

Definition NVPTXISelLowering.cpp:515

std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const

Definition NVPTXISelLowering.cpp:1172

unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override

Definition NVPTXISelLowering.cpp:7020

bool useF32FTZ(const MachineFunction &MF) const

Definition NVPTXISelLowering.cpp:150

SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const

Definition NVPTXISelLowering.cpp:1820

Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const

Definition NVPTXISelLowering.cpp:1276

SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override

Hooks for building estimates in place of slower divisions and square roots.

Definition NVPTXISelLowering.cpp:1123

SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override

This hook must be implemented to lower outgoing return values, described by the Outs array,...

Definition NVPTXISelLowering.cpp:4003

SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override

This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...

Definition NVPTXISelLowering.cpp:3883

void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override

Lower the specified operand into the Ops vector.

Definition NVPTXISelLowering.cpp:4067

SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const

Definition NVPTXISelLowering.cpp:1798

Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override

Definition NVPTXISelLowering.cpp:6996

std::string getParamName(const Function *F, int Idx) const

Definition NVPTXISelLowering.cpp:5275

TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override

Return the preferred vector type legalization action.

Definition NVPTXISelLowering.cpp:1116

NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const

Definition NVPTXISelLowering.cpp:123

bool shouldInsertFencesForAtomic(const Instruction *) const override

Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.

Definition NVPTXISelLowering.cpp:6950

Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const

getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...

Definition NVPTXISelLowering.cpp:5228

SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const

Definition NVPTXISelLowering.cpp:1758

EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override

Return the ValueType of the result of SETCC operations.

std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override

Given a physical register constraint (e.g.

Definition NVPTXISelLowering.cpp:5357

bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override

isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...

Definition NVPTXISelLowering.cpp:5294

Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override

Inserts in the IR a target-specific intrinsic specifying a fence.

Definition NVPTXISelLowering.cpp:6978

AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override

Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.

Definition NVPTXISelLowering.cpp:6864

Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const

Helper for computing alignment of a device function byval parameter.

Definition NVPTXISelLowering.cpp:5249

bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const

Definition NVPTXISelLowering.cpp:5389

bool usePrecSqrtF32(const SDNode *N=nullptr) const

Definition NVPTXISelLowering.cpp:136

unsigned getJumpTableEncoding() const override

Return the entry encoding for a jump table in the current function.

Definition NVPTXISelLowering.cpp:3364

SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override

This hook must be implemented to lower calls into the specified DAG.

Definition NVPTXISelLowering.cpp:1369

void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override

Determine which of the bits specified in Mask are known to be either zero or one and return them in t...

Definition NVPTXISelLowering.cpp:7100

MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override

Definition NVPTXISelLowering.cpp:7046

~NVPTXTargetObjectFile() override

static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)

This constructs a pointer to an object of the specified type in a numbered address space.

Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...

const DebugLoc & getDebugLoc() const

Represents one node in the SelectionDAG.

ArrayRef< SDUse > ops() const

const APInt & getAsAPIntVal() const

Helper method returns the APInt value of a ConstantSDNode.

unsigned getOpcode() const

Return the SelectionDAG opcode value for this node.

bool hasOneUse() const

Return true if there is exactly one use of this node.

unsigned getIROrder() const

Return the node ordering.

SDNodeFlags getFlags() const

uint64_t getAsZExtVal() const

Helper method returns the zero-extended integer value of a ConstantSDNode.

unsigned getNumValues() const

Return the number of values defined/returned by this operator.

SDVTList getVTList() const

const SDValue & getOperand(unsigned Num) const

bool isUndef() const

Returns true if the node type is UNDEF or POISON.

iterator_range< user_iterator > users()

void setFlags(SDNodeFlags NewFlags)

Represents a use of a SDNode.

Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.

SDNode * getNode() const

get the SDNode which holds the desired result

bool hasOneUse() const

Return true if there is exactly one node using value ResNo of Node.

SDValue getValue(unsigned R) const

EVT getValueType() const

Return the ValueType of the referenced return value.

const SDValue & getOperand(unsigned i) const

uint64_t getScalarValueSizeInBits() const

uint64_t getConstantOperandVal(unsigned i) const

unsigned getOpcode() const

SectionKind - This is a simple POD value that classifies the properties of a section.

This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...

LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

const SDValue & getRoot() const

Return the root tag of the SelectionDAG.

LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)

Return an AddrSpaceCastSDNode.

const TargetSubtargetInfo & getSubtarget() const

LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)

Create a MERGE_VALUES node from the given operands.

LLVM_ABI SDVTList getVTList(EVT VT)

Return an SDVTList that represents the list of values specified.

LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())

Append the extracted elements from Start to Count out of the vector Op in Args.

LLVM_ABI SDValue getFreeze(SDValue V)

Return a freeze using the SDLoc of the value operand.

SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)

Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...

LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)

Return a GlobalAddress of the function from the current module with name matching the given ExternalS...

LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)

Create a ConstantFPSDNode wrapping a constant value.

LLVM_ABI SDValue getRegister(Register Reg, EVT VT)

LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)

Loads are not normal binary operators: their result type is not determined by their operands,...

LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())

Creates a MemIntrinsicNode that may produce a result and takes a list of operands.

LLVM_ABI Align getEVTAlign(EVT MemoryVT) const

Compute the default alignment value for the given type.

LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)

Create a bitwise NOT operation as (XOR Val, -1).

const TargetLowering & getTargetLoweringInfo() const

LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)

This mutates the specified node to have the specified return type, opcode, and operands.

SDValue getUNDEF(EVT VT)

Return an UNDEF node. UNDEF does not have a useful SDLoc.

SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)

Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).

SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)

Return an ISD::BUILD_VECTOR node.

LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)

Return a bitcast using the SDLoc of the value operand, and casting to the provided type.

SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())

Helper function to make it easier to build Select's if you just have operands and don't want to check...

const DataLayout & getDataLayout() const

LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)

Creates a new TokenFactor containing Vals.

LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

Create a ConstantSDNode wrapping a constant value.

LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

Helper function to build ISD::STORE nodes.

LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)

Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...

LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)

SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())

Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...

LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)

LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)

Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...

LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)

LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)

Gets or creates the specified node.

LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)

Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...

SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)

LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)

MachineFunction & getMachineFunction() const

LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const

Determine which bits of Op are known to be either zero or one and return them in Known.

LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)

Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...

SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)

Create an add instruction with appropriate flags when used for addressing some offset of an object.

LLVMContext * getContext() const

const SDValue & setRoot(SDValue N)

Set the current root tag of the SelectionDAG.

LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)

ArrayRef< int > getMask() const

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void append(ItTy in_start, ItTy in_end)

Add the specified range to the end of the SmallVector.

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

This class is used to represent ISD::STORE nodes.

StringRef - Represent a constant reference to a string, i.e.

constexpr size_t size() const

size - Get the string size.

constexpr const char * data() const

data - Get a pointer to the start of the string (which may not be null terminated).

Align getStackAlign() const

getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...

void setBooleanVectorContents(BooleanContent Ty)

Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...

void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)

Indicate that the specified operation does not work with the specified type and indicate what to do a...

void setMaxDivRemBitWidthSupported(unsigned SizeInBits)

Set the size in bits of the maximum div/rem the backend supports.

EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const

Return the EVT corresponding to this LLVM type.

unsigned MaxStoresPerMemcpyOptSize

Likewise for functions with the OptSize attribute.

const TargetMachine & getTargetMachine() const

virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const

Certain targets require unusual breakdowns of certain types.

virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const

Certain combinations of ABIs, Targets and features require that types are legal for some operations a...

void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)

Convenience method to set an operation to Promote and specify the type in a single call.

LegalizeTypeAction

This enum indicates whether a types are legal for a target, and if not, what action should be used to...

void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)

Tells the code generator which bitwidths to bypass.

virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const

Return the number of registers that this ValueType will eventually require.

void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)

Set the maximum atomic operation size supported by the backend.

virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const

Return the preferred vector type legalization action.

unsigned MaxStoresPerMemsetOptSize

Likewise for functions with the OptSize attribute.

void setBooleanContents(BooleanContent Ty)

Specify how the target extends the result of integer and floating point boolean values from i1 to a w...

unsigned MaxStoresPerMemmove

Specify maximum number of store instructions per memmove call.

void computeRegisterProperties(const TargetRegisterInfo *TRI)

Once all of the register classes are added, this allows us to compute derived properties we expose.

unsigned MaxStoresPerMemmoveOptSize

Likewise for functions with the OptSize attribute.

void addRegisterClass(MVT VT, const TargetRegisterClass *RC)

Add the specified register class as an available regclass for the specified value type.

bool isTypeLegal(EVT VT) const

Return true if the target has native support for the specified value type.

virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const

Return the pointer type for the given address space, defaults to the pointer type from the data layou...

bool isOperationLegal(unsigned Op, EVT VT) const

Return true if the specified operation is legal on this target.

unsigned MaxStoresPerMemset

Specify maximum number of store instructions per memset call.

void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified truncating store does not work with the specified type and indicate what ...

@ ZeroOrNegativeOneBooleanContent

void setMinCmpXchgSizeInBits(unsigned SizeInBits)

Sets the minimum cmpxchg or ll/sc size supported by the backend.

void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)

If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...

AtomicExpansionKind

Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.

void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)

Indicate that the specified condition code is or isn't supported on the target and indicate what to d...

void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)

Targets should invoke this method for each target independent node that they want to provide a custom...

Align getMinStackArgumentAlignment() const

Return the minimum stack alignment of an argument.

void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified load with extension does not work with the specified type and indicate wh...

std::vector< ArgListEntry > ArgListTy

virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const

virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const

Inserts in the IR a target-specific intrinsic specifying a fence.

unsigned MaxStoresPerMemcpy

Specify maximum number of store instructions per memcpy call.

void setSchedulingPreference(Sched::Preference Pref)

Specify the target scheduling preference.

MVT getRegisterType(MVT VT) const

Return the type of registers that this ValueType will eventually require.

void setJumpIsExpensive(bool isExpensive=true)

Tells the code generator not to expand logic operations on comparison predicates into separate sequen...

LegalizeAction getOperationAction(unsigned Op, EVT VT) const

Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...

This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...

SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const

More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...

virtual ConstraintType getConstraintType(StringRef Constraint) const

Given a constraint, return the type of constraint it is for this target.

virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const

Given a physical register constraint (e.g.

TargetLowering(const TargetLowering &)=delete

SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const

Truncate Op to ResultVT.

SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const

Expand round(fp) to fp conversion.

virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const

Lower the specified operand into the Ops vector.

Primary interface to the complete machine description for the target machine.

CodeGenOptLevel getOptLevel() const

Returns the optimization level: None, Less, Default, or Aggressive.

MCSymbol * getSymbol(const GlobalValue *GV) const

FPOpFusion::FPOpFusionMode AllowFPOpFusion

AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

virtual const TargetFrameLowering * getFrameLowering() const

static constexpr TypeSize getFixed(ScalarTy ExactSize)

The instances of the Type class are immutable: once they are created, they are never changed.

LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY

Return the basic size of this type if it is a primitive type.

bool isFloatingPointTy() const

Return true if this is one of the floating-point types.

bool isIntegerTy() const

True if this is an instance of IntegerType.

bool isVoidTy() const

Return true if this is 'void'.

Type * getType() const

All values are typed, get the type of this value.

A raw_ostream that writes to an std::string.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

LLVM_ABI APInt pow(const APInt &X, int64_t N)

Compute X^N for N>=0.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ C

The default llvm calling convention, compatible with C.

NodeType

ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.

@ SETCC

SetCC operator - This evaluates to a true value iff the condition is true.

@ POISON

POISON - A poison node.

@ SMUL_LOHI

SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...

@ BSWAP

Byte Swap and Counting operators.

@ ADDC

Carry-setting nodes for multiple precision addition and subtraction.

@ ADD

Simple integer binary arithmetic operators.

@ ANY_EXTEND

ANY_EXTEND - Used for integer types. The high bits are undefined.

@ FMA

FMA - Perform a * b + c with no intermediate rounding step.

@ INTRINSIC_VOID

OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...

@ SINT_TO_FP

[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...

@ CONCAT_VECTORS

CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...

@ FADD

Simple binary floating point operators.

@ ABS

ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.

@ SDIVREM

SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.

@ BUILD_PAIR

BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.

@ SIGN_EXTEND

Conversion operators.

@ SSUBO

Same for subtraction.

@ SSUBSAT

RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...

@ SELECT

Select(COND, TRUEVAL, FALSEVAL).

@ UNDEF

UNDEF - An undefined node.

@ EXTRACT_ELEMENT

EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...

@ CopyFromReg

CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...

@ SADDO

RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.

@ MULHU

MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...

@ SHL

Shift and rotation operations.

@ VECTOR_SHUFFLE

VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.

@ EXTRACT_SUBVECTOR

EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.

@ EXTRACT_VECTOR_ELT

EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...

@ CopyToReg

CopyToReg - This node has three operands: a chain, a register number to set to this value,...

@ ZERO_EXTEND

ZERO_EXTEND - Used for integer types, zeroing the new bits.

@ SELECT_CC

Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...

@ SSHLSAT

RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.

@ SMULO

Same for multiplication.

@ SIGN_EXTEND_INREG

SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...

@ SMIN

[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.

@ VSELECT

Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...

@ UADDO_CARRY

Carry-using nodes for multiple precision addition and subtraction.

@ FRAMEADDR

FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.

@ STRICT_FP_TO_SINT

STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.

@ FP_TO_SINT

FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.

@ AND

Bitwise operators - logical and, logical or, logical xor.

@ INTRINSIC_WO_CHAIN

RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...

@ ADDE

Carry-using nodes for multiple precision addition and subtraction.

@ FREEZE

FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...

@ INSERT_VECTOR_ELT

INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.

@ FP_ROUND

X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...

@ TRUNCATE

TRUNCATE - Completely drop the high bits.

@ SHL_PARTS

SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.

@ FCOPYSIGN

FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.

@ SADDSAT

RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...

@ SADDO_CARRY

Carry-using overflow-aware nodes for multiple precision addition and subtraction.

@ INTRINSIC_W_CHAIN

RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...

@ BUILD_VECTOR

BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...

LLVM_ABI bool allOperandsUndef(const SDNode *N)

Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...

This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.

@ Bitcast

Perform the operation on a different, but equivalently sized type.

@ ADDRESS_SPACE_SHARED_CLUSTER

@ ATOMIC_CMP_SWAP_B128

These nodes are used to lower atomic instructions with i128 type.

bool isPackedVectorTy(EVT VT)

ValuesClass values(OptsTy... Options)

Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...

initializer< Ty > init(const Ty &Val)

NodeAddr< NodeBase * > Node

This is an optimization pass for GlobalISel generic memory operations.

@ Low

Lower the current thread's priority such that it does not affect foreground tasks significantly.

detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)

zip iterator for two or more iteratable types.

FunctionAddr VTableAddr Value

bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

MaybeAlign getAlign(const CallInst &I, unsigned Index)

auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)

Get the size of a range.

void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())

ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

uint64_t PowerOf2Ceil(uint64_t A)

Returns the power of two which is greater than or equal to the given value.

bool isReleaseOrStronger(AtomicOrdering AO)

OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)

Wrapper function around std::transform to apply a function to a range and store the result elsewhere.

auto reverse(ContainerTy &&C)

unsigned promoteScalarArgumentSize(unsigned size)

LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)

bool shouldPassAsArray(Type *Ty)

CodeGenOptLevel

Code generation optimization level.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

AtomicOrdering

Atomic ordering for LLVM's memory model.

@ Sub

Subtraction of integers.

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

DWARFExpression::Operation Op

ArrayRef(const T &OneElt) -> ArrayRef< T >

bool isAcquireOrStronger(AtomicOrdering AO)

constexpr unsigned BitWidth

bool isKernelFunction(const Function &F)

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

Function * getMaybeBitcastedCallee(const CallBase *CB)

Align commonAlignment(Align A, uint64_t Offset)

Returns the alignment that satisfies both alignments.

auto seq(T Begin, T End)

Iterate over an integral type from Begin up to - but not including - End.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

constexpr uint64_t value() const

This is a hole in the type system and should not be abused.

@ PreserveSign

The sign of a flushed-to-zero number is preserved in the sign of 0.

DenormalModeKind Output

Denormal flushing mode for floating point instruction results in the default floating point environme...

TypeSize getStoreSize() const

Return the number of bytes overwritten by a store of the specified value type.

bool isSimple() const

Test if the given EVT is simple (as opposed to being extended).

static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)

Returns the EVT that represents a vector NumElements in length, where each element is of type VT.

EVT changeTypeToInteger() const

Return the type converted to an equivalently sized integer or vector with integer element type.

bool bitsGT(EVT VT) const

Return true if this has more bits than VT.

bool bitsLT(EVT VT) const

Return true if this has less bits than VT.

ElementCount getVectorElementCount() const

bool is32BitVector() const

Return true if this is a 32-bit vector type.

TypeSize getSizeInBits() const

Return the size of the specified value type in bits.

uint64_t getScalarSizeInBits() const

MVT getSimpleVT() const

Return the SimpleValueType held in the specified simple EVT.

uint64_t getFixedSizeInBits() const

Return the size of the specified fixed width value type in bits.

bool isVector() const

Return true if this is a vector value type.

EVT getScalarType() const

If this is a vector type, return the element type, otherwise return this.

bool bitsEq(EVT VT) const

Return true if this has the same number of bits as VT.

LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const

This method returns an LLVM type corresponding to the specified EVT.

EVT getVectorElementType() const

Given a vector type, return the type of each element.

bool isScalarInteger() const

Return true if this is an integer, but not a vector.

EVT changeVectorElementType(EVT EltVT) const

Return a VT for a vector type whose attributes match ourselves with the exception of the element type...

unsigned getVectorNumElements() const

Given a vector type, return the number of elements it contains.

bool isInteger() const

Return true if this is an integer or a vector integer type.

static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)

Compute known bits for ashr(LHS, RHS).

KnownBits concat(const KnownBits &Lo) const

Concatenate the bits from Lo onto the bottom of *this.

unsigned getBitWidth() const

Get the bit width of this value.

void resetAll()

Resets the known state of all bits.

void insertBits(const KnownBits &SubBits, unsigned BitPosition)

Insert the bits from a smaller known bits starting at bitPosition.

This class contains a discriminated union of information about pointers in memory operands,...

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.

These are IR-level optimization flags that may be propagated to SDNodes.

bool hasAllowContract() const

This represents a list of ValueType's that has been intern'd by a SelectionDAG.

This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...

This structure contains all information that is necessary for lowering calls.

SmallVector< ISD::InputArg, 32 > Ins

SmallVector< ISD::OutputArg, 32 > Outs

SmallVector< SDValue, 32 > OutVals

Type * RetTy

Same as OrigRetTy, or partially legalized for soft float libcalls.

bool isAfterLegalizeDAG() const

bool isBeforeLegalize() const

A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...

bool CombineTo(SDValue O, SDValue N)