LLVM: lib/Target/NVPTX/NVPTXISelLowering.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
51#include "llvm/IR/IntrinsicsNVPTX.h"
66#include
67#include
68#include
69#include
70#include
71#include
72#include
73#include
74#include
75#include
76
77#define DEBUG_TYPE "nvptx-lower"
78
79using namespace llvm;
80
82 "nvptx-sched4reg",
83 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
84
87 cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
88 " 1: do it 2: do it aggressively"),
90
94 "NVPTX Specific: Override the precision of the lowering for f32 fdiv"),
99 "Use IEEE Compliant F32 div.rnd if available (default)"),
101 "Use IEEE Compliant F32 div.rnd if available, no FTZ")),
103
106 cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
108
109
110
112 "nvptx-approx-log2f32",
113 cl::desc("NVPTX Specific: whether to use lg2.approx for log2"),
115
117 "nvptx-force-min-byval-param-align", cl::Hidden,
118 cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval"
119 " params of device functions."),
121
125
128
130 if (Flags.hasApproximateFuncs())
132
134}
135
137
140
141 if (N) {
143 if (Flags.hasApproximateFuncs())
144 return false;
145 }
146
147 return true;
148}
149
154
157 default:
158 return false;
159 case MVT::v2i1:
160 case MVT::v4i1:
161 case MVT::v2i8:
162 case MVT::v4i8:
163 case MVT::v8i8:
164 case MVT::v16i8:
165 case MVT::v2i16:
166 case MVT::v4i16:
167 case MVT::v8i16:
168 case MVT::v2i32:
169 case MVT::v4i32:
170 case MVT::v2i64:
171 case MVT::v2f16:
172 case MVT::v4f16:
173 case MVT::v8f16:
174 case MVT::v2bf16:
175 case MVT::v4bf16:
176 case MVT::v8bf16:
177 case MVT::v2f32:
178 case MVT::v4f32:
179 case MVT::v2f64:
180 case MVT::v4i64:
181 case MVT::v4f64:
182 case MVT::v8i32:
183 case MVT::v8f32:
184 case MVT::v16f16:
185 case MVT::v16bf16:
186 case MVT::v16i16:
187 case MVT::v32i8:
188 return true;
189 }
190}
191
192
193
194
195
196
197
198
199static std::optional<std::pair<unsigned int, MVT>>
203
206 return {{4, MVT::i64}};
207
209 return std::nullopt;
211
213 if (VectorVT == MVT::i128 || VectorVT == MVT::f128)
214 return {{2, MVT::i64}};
215 return std::nullopt;
216 }
217
220
221
222 unsigned PackRegSize;
223
224
225
226
228 default:
229 return std::nullopt;
230
231 case MVT::v4i64:
232 case MVT::v4f64:
233
234
235 if (!CanLowerTo256Bit)
236 return std::nullopt;
237 [[fallthrough]];
238 case MVT::v2i8:
239 case MVT::v2i64:
240 case MVT::v2f64:
241
242 return std::pair(NumElts, EltVT);
243
244 case MVT::v16f16:
245 case MVT::v16bf16:
246 case MVT::v16i16:
247 case MVT::v32i8:
248
249
250 if (!CanLowerTo256Bit)
251 return std::nullopt;
252 [[fallthrough]];
253 case MVT::v2i16:
254 case MVT::v2f16:
255 case MVT::v2bf16:
256 case MVT::v4i8:
257 case MVT::v4i16:
258 case MVT::v4f16:
259 case MVT::v4bf16:
260 case MVT::v8i8:
261 case MVT::v8f16:
262 case MVT::v8bf16:
263 case MVT::v8i16:
264 case MVT::v16i8:
265 PackRegSize = 32;
266 break;
267
268 case MVT::v8f32:
269 case MVT::v8i32:
270
271
272 if (!CanLowerTo256Bit)
273 return std::nullopt;
274 [[fallthrough]];
275 case MVT::v2f32:
276 case MVT::v4f32:
277 case MVT::v2i32:
278 case MVT::v4i32:
280 return std::pair(NumElts, EltVT);
281 PackRegSize = 64;
282 break;
283 }
284
285
286
287
288
289
290 const unsigned NPerReg = PackRegSize / EltVT.getSizeInBits();
291
292 return std::pair(NumElts / NPerReg, MVT::getVectorVT(EltVT, NPerReg));
293}
294
295
296
297
298
299
300
301
306 uint64_t StartingOffset = 0) {
309 ComputeValueVTs(TLI, DL, Ty, TempVTs, nullptr, &TempOffsets,
310 StartingOffset);
311
312 for (const auto [VT, Off] : zip(TempVTs, TempOffsets)) {
315
316
317
318 if (VT.getScalarType() == MVT::i8) {
319 if (RegisterVT == MVT::i16)
320 RegisterVT = MVT::i8;
321 else if (RegisterVT == MVT::v2i16)
322 RegisterVT = MVT::v2i8;
323 else
324 assert(RegisterVT == MVT::v4i8 &&
325 "Expected v4i8, v2i16, or i16 for i8 RegisterVT");
326 }
327
328
329
330
331
332 for (unsigned I : seq(NumRegs)) {
334 Offsets.push_back(Off + I * RegisterVT.getStoreSize());
335 }
336 }
337}
338
339
340
341
343 if (N == 1)
344 return VT;
345
349}
350
353 if (V.getValueType() == VT) {
354 assert(I == 0 && "Index must be 0 for scalar value");
355 return V;
356 }
357
361
365}
366
367template
370 if (N == 1)
371 return GetElement(0);
372
378 else
380 }
381
383 Values.size());
385}
386
387
388
389
390
391
395 default:
397 "Promotion is not suitable for scalars of size larger than 64-bits");
398 case 1:
399 return MVT::i1;
400 case 2:
401 case 4:
402 case 8:
403 return MVT::i8;
404 case 16:
405 return MVT::i16;
406 case 32:
407 return MVT::i32;
408 case 64:
409 return MVT::i64;
410 }
411 }
412 return VT;
413}
414
415
416
417
418
419
420
421
422
423
424
425template
429
430
431 if (ParamAlignment < AccessSize)
432 return 1;
433
434 if (Offsets[Idx] & (AccessSize - 1))
435 return 1;
436
437 EVT EltVT = ValueVTs[Idx];
439
440
441 if (EltSize >= AccessSize)
442 return 1;
443
444 unsigned NumElts = AccessSize / EltSize;
445
446 if (AccessSize != EltSize * NumElts)
447 return 1;
448
449
450 if (Idx + NumElts > ValueVTs.size())
451 return 1;
452
453
454 if (NumElts != 4 && NumElts != 2)
455 return 1;
456
457 for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
458
459 if (ValueVTs[j] != EltVT)
460 return 1;
461
462
463 if (Offsets[j] - Offsets[j - 1] != EltSize)
464 return 1;
465 }
466
467 return NumElts;
468}
469
470
471
472
473
474
475
476
477
478template
482 bool IsVAArg = false) {
483
484
485
486 if (IsVAArg)
488
490
491 const auto GetNumElts = [&](unsigned I) -> unsigned {
492 for (const unsigned AccessSize : {16, 8, 4, 2}) {
494 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
495 assert((NumElts == 1 || NumElts == 2 || NumElts == 4) &&
496 "Unexpected vectorization size");
497 if (NumElts != 1)
498 return NumElts;
499 }
500 return 1;
501 };
502
503
504 for (unsigned I = 0, E = ValueVTs.size(); I != E;) {
505 const unsigned NumElts = GetNumElts(I);
506 VectorInfo.push_back(NumElts);
507 I += NumElts;
508 }
509 assert(std::accumulate(VectorInfo.begin(), VectorInfo.end(), 0u) ==
510 ValueVTs.size());
511 return VectorInfo;
512}
513
514
517 : TargetLowering(TM, STI), nvTM(&TM), STI(STI), GlobalUniqueCallSite(0) {
518
519
520
524
527
528
529
531
532
533
535
536
539 else
541
542 auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
544 bool IsOpSupported = STI.allowFP16Math();
545 switch (Op) {
546
547 case ISD::FMINNUM:
548 case ISD::FMAXNUM:
549 case ISD::FMAXNUM_IEEE:
550 case ISD::FMINNUM_IEEE:
551 case ISD::FMAXIMUM:
552 case ISD::FMINIMUM:
553 case ISD::FMAXIMUMNUM:
554 case ISD::FMINIMUMNUM:
555 IsOpSupported &= STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
556 break;
557 case ISD::FEXP2:
558 IsOpSupported &= STI.getSmVersion() >= 75 && STI.getPTXVersion() >= 70;
559 break;
560 }
562 };
563
564 auto setBF16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
566 bool IsOpSupported = STI.hasNativeBF16Support(Op);
568 Op, VT, IsOpSupported ? Action : NoBF16Action);
569 };
570
571 auto setI16x2OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
573 bool IsOpSupported = false;
574
575 switch (Op) {
581 IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 80;
582 break;
583 }
585 };
586
599
600 if (STI.hasF32x2Instructions()) {
603 }
604
605
610
612 if (STI.getSmVersion() >= 30 && STI.getPTXVersion() > 31)
614
617
618
623
628
629
634
639
640
643
647
648
649 if (STI.hasF32x2Instructions())
652
653
655
656
657
674 {MVT::v4i8, MVT::v2i32}, Expand);
675
676
677 for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
678 MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
679 MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) {
682 }
683
684
686
687
688
695
702
705
707 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},
709
710 if (STI.hasHWROT32()) {
714 }
715
718
719
720
722
723
724 for (auto FloatVTs :
726 for (MVT ValVT : FloatVTs) {
727 for (MVT MemVT : FloatVTs) {
730 }
731 }
732 }
733
734
735
736
737 for (auto IntVTs :
739 for (MVT ValVT : IntVTs)
740 for (MVT MemVT : IntVTs)
743
744
750 }
751
752
753
754
755
756
757
761 {MVT::v2i8, MVT::v2i16}, Expand);
765
766
767
768
769 setOperationAction({ISD::LOAD, ISD::STORE}, {MVT::i128, MVT::i256, MVT::f128},
772 if ((VT) && VT.getStoreSizeInBits() <= 256)
773 setOperationAction({ISD::STORE, ISD::LOAD, ISD::MSTORE, ISD::MLOAD}, VT,
775
776
777
778
783
788
789
794
797
798
800
802
803
808
810 {MVT::i16, MVT::i32, MVT::i64}, Legal);
811
816
824
831
832
836 {MVT::v2i16, MVT::v2i32}, Expand);
837
838
844
849 if (STI.getPTXVersion() >= 43) {
854 }
855
860
861
863
864
867
868
871 ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM,
872 ISD::FMAXIMUM, ISD::FMINIMUM, ISD::FMAXIMUMNUM,
877
878
879
880 if (STI.allowFP16Math() || STI.hasBF16Math())
882
883
884
887 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
889 ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM},
891 }
892 }
893
894
895
896
897
898
899
901 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
902 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
903 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
904
905 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
909 STI.hasF32x2Instructions() ? Legal : Expand);
910 }
911
912
914 for (const auto &VT : {MVT::bf16, MVT::v2bf16}) {
915 if (!STI.hasNativeBF16Support(Op) && STI.hasNativeBF16Support(ISD::FMA)) {
917 }
918 }
919 }
920
921
922 const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 &&
923 STI.getPTXVersion() >= 60 &&
924 STI.allowFP16Math();
925 for (const auto &VT : {MVT::f16, MVT::v2f16})
927 IsFP16FP16x2NegAvailable ? Legal : Expand);
928
929 setBF16OperationAction(ISD::FNEG, MVT::bf16, Legal, Expand);
930 setBF16OperationAction(ISD::FNEG, MVT::v2bf16, Legal, Expand);
932
933
934
935 for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
936 ISD::FROUNDEVEN, ISD::FTRUNC}) {
943 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
946 }
947
948 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71) {
950 }
951 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
952 for (MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
955 }
956 }
957
958
960
962
963
964
965 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
966 for (MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
970 }
974 }
975
983
984
991
992
993
994
995 for (const auto &Op :
996 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FTANH}) {
999
1002 }
1006 }
1008
1011 if (STI.getPTXVersion() >= 65) {
1012 setFP16OperationAction(ISD::FABS, MVT::f16, Legal, Promote);
1013 setFP16OperationAction(ISD::FABS, MVT::v2f16, Legal, Expand);
1014 } else {
1017 }
1018 setBF16OperationAction(ISD::FABS, MVT::v2bf16, Legal, Expand);
1019 setBF16OperationAction(ISD::FABS, MVT::bf16, Legal, Promote);
1022
1023 for (const auto &Op :
1024 {ISD::FMINNUM, ISD::FMAXNUM, ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM}) {
1027 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
1028 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1029 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1030 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
1034 }
1035 bool SupportsF32MinMaxNaN =
1036 STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
1037 for (const auto &Op : {ISD::FMINIMUM, ISD::FMAXIMUM}) {
1039 setFP16OperationAction(Op, MVT::f16, Legal, Expand);
1040 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1041 setBF16OperationAction(Op, MVT::bf16, Legal, Expand);
1042 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1044 }
1045
1046
1049
1050
1051
1052
1053
1054
1057 setFP16OperationAction(ISD::FEXP2, MVT::f16, Legal, Promote);
1058 setFP16OperationAction(ISD::FEXP2, MVT::v2f16, Legal, Expand);
1059 setBF16OperationAction(ISD::FEXP2, MVT::bf16, Legal, Promote);
1060 setBF16OperationAction(ISD::FEXP2, MVT::v2bf16, Legal, Expand);
1061
1062
1063
1068 setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16, MVT::v2f32},
1070 }
1071
1073
1075
1076
1077
1078 setOperationAction({ISD::ATOMIC_CMP_SWAP, ISD::ATOMIC_SWAP}, MVT::i128,
1080
1081
1082
1084
1085
1089
1090
1092 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1093 MVT::v32i32, MVT::v64i32, MVT::v128i32},
1095
1096
1098 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1099 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::Other},
1101
1102
1103
1104
1105
1106
1108 {MVT::i32, MVT::i128, MVT::v4f32, MVT::Other}, Custom);
1109
1110
1113}
1114
1122
1124 int Enabled, int &ExtraSteps,
1125 bool &UseOneConst,
1126 bool Reciprocal) const {
1130
1132 ExtraSteps = 0;
1133
1137
1138 auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
1141 };
1142
1143
1144
1145
1146
1147 if (Reciprocal || ExtraSteps > 0) {
1148 if (VT == MVT::f32)
1149 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1150 : Intrinsic::nvvm_rsqrt_approx_f);
1151 else if (VT == MVT::f64)
1152 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1153 else
1155 } else {
1156 if (VT == MVT::f32)
1157 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1158 : Intrinsic::nvvm_sqrt_approx_f);
1159 else {
1160
1161
1162
1163
1166 DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
1167 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1168 }
1169 }
1170}
1171
1175 std::optional FirstVAArg, const CallBase &CB,
1176 unsigned UniqueCallSite) const {
1178
1179 std::string Prototype;
1181 O << "prototype_" << UniqueCallSite << " : .callprototype ";
1182
1184 O << "()";
1185 } else {
1186 O << "(";
1188 const Align RetAlign = getArgumentAlignment(&CB, RetTy, 0, DL);
1189 O << ".param .align " << RetAlign.value() << " .b8 _["
1190 << DL.getTypeAllocSize(RetTy) << "]";
1192 unsigned size = 0;
1194 size = ITy->getBitWidth();
1195 } else {
1197 "Floating point type expected here");
1199 }
1200
1201
1202
1204
1205 O << ".param .b" << size << " _";
1207 O << ".param .b" << PtrVT.getSizeInBits() << " _";
1208 } else {
1210 }
1211 O << ") ";
1212 }
1213 O << "_ (";
1214
1215 bool first = true;
1216
1217 const unsigned NumArgs = FirstVAArg.value_or(Args.size());
1218 auto AllOuts = ArrayRef(Outs);
1219 for (const unsigned I : llvm::seq(NumArgs)) {
1220 const auto ArgOuts =
1221 AllOuts.take_while([I](auto O) { return O.OrigArgIndex == I; });
1222 AllOuts = AllOuts.drop_front(ArgOuts.size());
1223
1225 if (!first) {
1226 O << ", ";
1227 }
1228 first = false;
1229
1230 if (ArgOuts[0].Flags.isByVal()) {
1231
1232
1233 Type *ETy = Args[I].IndirectType;
1234 Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1235 Align ParamByValAlign =
1237
1238 O << ".param .align " << ParamByValAlign.value() << " .b8 _["
1239 << ArgOuts[0].Flags.getByValSize() << "]";
1240 } else {
1242 Align ParamAlign =
1243 getArgumentAlignment(&CB, Ty, I + AttributeList::FirstArgIndex, DL);
1244 O << ".param .align " << ParamAlign.value() << " .b8 _["
1245 << DL.getTypeAllocSize(Ty) << "]";
1246 continue;
1247 }
1248
1250 (getValueType(DL, Ty) == MVT::i8 && ArgOuts[0].VT == MVT::i16)) &&
1251 "type mismatch between callee prototype and arguments");
1252
1253 unsigned sz = 0;
1257 sz = PtrVT.getSizeInBits();
1258 } else {
1259 sz = Ty->getPrimitiveSizeInBits();
1260 }
1261 O << ".param .b" << sz << " _";
1262 }
1263 }
1264
1265 if (FirstVAArg)
1266 O << (first ? "" : ",") << " .param .align "
1267 << STI.getMaxRequiredAlignment() << " .b8 _[]";
1268 O << ")";
1270 O << " .noreturn";
1271 O << ";";
1272
1273 return Prototype;
1274}
1275
1280
1281Align NVPTXTargetLowering::getArgumentAlignment(const CallBase *CB, Type *Ty,
1282 unsigned Idx,
1284 if (!CB) {
1285
1286 return DL.getABITypeAlign(Ty);
1287 }
1288
1290
1291 if (!DirectCallee) {
1292
1293
1294
1295
1297
1299 return StackAlign.value();
1300 }
1302 }
1303
1304
1305
1306 if (DirectCallee)
1308
1309
1310 return DL.getABITypeAlign(Ty);
1311}
1312
1315 if (!Func)
1316 return false;
1318 return CB->getFunctionType() != CalleeFunc->getFunctionType();
1319 return false;
1320}
1321
1329
1331 }
1332
1333
1334
1335 if (Ptr->getOpcode() == ISD::ADDRSPACECAST) {
1340 }
1341 }
1342
1344}
1345
1347 if (Flags.isSExt())
1349 if (Flags.isZExt())
1352}
1353
1357 const EVT ActualVT = V.getValueType();
1358 assert((ActualVT == ExpectedVT ||
1360 "Non-integer argument type size mismatch");
1361 if (ExpectedVT.bitsGT(ActualVT))
1363 if (ExpectedVT.bitsLT(ActualVT))
1365
1366 return V;
1367}
1368
1371
1372 if (CLI.IsVarArg && (STI.getPTXVersion() < 60 || STI.getSmVersion() < 30))
1374 "Support for variadic functions (unsized array parameter) introduced "
1375 "in PTX ISA version 6.0 and requires target sm_30.");
1376
1386
1387 const auto GetI32 = [&](const unsigned I) {
1389 };
1390
1391 const unsigned UniqueCallSite = GlobalUniqueCallSite++;
1393 const SDValue StartChain =
1396
1398
1399 const auto MakeDeclareScalarParam = [&](SDValue Symbol, unsigned Size) {
1400
1401
1404 DAG.getNode(NVPTXISD::DeclareScalarParam, dl, {MVT::Other, MVT::Glue},
1405 {StartChain, Symbol, GetI32(SizeBits), DeclareGlue});
1407 DeclareGlue = Declare.getValue(1);
1408 return Declare;
1409 };
1410
1411 const auto MakeDeclareArrayParam = [&](SDValue Symbol, Align Align,
1412 unsigned Size) {
1414 NVPTXISD::DeclareArrayParam, dl, {MVT::Other, MVT::Glue},
1415 {StartChain, Symbol, GetI32(Align.value()), GetI32(Size), DeclareGlue});
1417 DeclareGlue = Declare.getValue(1);
1418 return Declare;
1419 };
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1437 "Non-VarArg function with extra arguments");
1438
1439 const unsigned FirstVAArg = CLI.NumFixedArgs;
1440 unsigned VAOffset = 0;
1441
1442 const SDValue VADeclareParam =
1443 CLI.Args.size() > FirstVAArg
1444 ? MakeDeclareArrayParam(getCallParamSymbol(DAG, FirstVAArg, MVT::i32),
1445 Align(STI.getMaxRequiredAlignment()), 0)
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1459 assert(AllOuts.size() == AllOutVals.size() &&
1460 "Outs and OutVals must be the same size");
1461
1462
1464 const auto ArgI = E.index();
1465 const auto Arg = E.value();
1466 const auto ArgOuts =
1467 AllOuts.take_while([&](auto O) { return O.OrigArgIndex == ArgI; });
1468 const auto ArgOutVals = AllOutVals.take_front(ArgOuts.size());
1469 AllOuts = AllOuts.drop_front(ArgOuts.size());
1470 AllOutVals = AllOutVals.drop_front(ArgOuts.size());
1471
1472 const bool IsVAArg = (ArgI >= FirstVAArg);
1473 const bool IsByVal = Arg.IsByVal;
1474
1475 const SDValue ParamSymbol =
1476 getCallParamSymbol(DAG, IsVAArg ? FirstVAArg : ArgI, MVT::i32);
1477
1478 assert((!IsByVal || Arg.IndirectType) &&
1479 "byval arg must have indirect type");
1480 Type *ETy = (IsByVal ? Arg.IndirectType : Arg.Ty);
1481
1482 const Align ArgAlign = [&]() {
1483 if (IsByVal) {
1484
1485
1486
1487 const Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1489 InitialAlign, DL);
1490 }
1491 return getArgumentAlignment(CB, Arg.Ty, ArgI + 1, DL);
1492 }();
1493
1494 const unsigned TySize = DL.getTypeAllocSize(ETy);
1495 assert((!IsByVal || TySize == ArgOuts[0].Flags.getByValSize()) &&
1496 "type size mismatch");
1497
1498 const SDValue ArgDeclare = [&]() {
1499 if (IsVAArg)
1500 return VADeclareParam;
1501
1503 return MakeDeclareArrayParam(ParamSymbol, ArgAlign, TySize);
1504
1505 assert(ArgOuts.size() == 1 && "We must pass only one value as non-array");
1506 assert((ArgOuts[0].VT.isInteger() || ArgOuts[0].VT.isFloatingPoint()) &&
1507 "Only int and float types are supported as non-array arguments");
1508
1509 return MakeDeclareScalarParam(ParamSymbol, TySize);
1510 }();
1511
1512 if (IsByVal) {
1513 assert(ArgOutVals.size() == 1 && "We must pass only one value as byval");
1514 SDValue SrcPtr = ArgOutVals[0];
1515 const auto PointerInfo = refinePtrAS(SrcPtr, DAG, DL, *this);
1516 const Align BaseSrcAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1517
1518 if (IsVAArg)
1519 VAOffset = alignTo(VAOffset, ArgAlign);
1520
1524
1525 unsigned J = 0;
1527 for (const unsigned NumElts : VI) {
1532 DAG.getLoad(LoadVT, dl, CallChain, SrcAddr, PointerInfo, SrcAlign);
1533
1534 TypeSize ParamOffset = Offsets[J].getWithIncrement(VAOffset);
1539 DAG.getStore(ArgDeclare, dl, SrcLoad, ParamAddr,
1541 CallPrereqs.push_back(StoreParam);
1542
1543 J += NumElts;
1544 }
1545 if (IsVAArg)
1546 VAOffset += TySize;
1547 } else {
1551 VAOffset);
1552 assert(VTs.size() == Offsets.size() && "Size mismatch");
1553 assert(VTs.size() == ArgOuts.size() && "Size mismatch");
1554
1555
1556
1557
1558
1559 const bool ExtendIntegerParam =
1560 Arg.Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Arg.Ty) < 32;
1561
1562 const auto GetStoredValue = [&](const unsigned I) {
1563 SDValue StVal = ArgOutVals[I];
1566 "OutVal type should always be legal");
1567
1569 const EVT StoreVT =
1570 ExtendIntegerParam ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1571
1572 return correctParamType(StVal, StoreVT, ArgOuts[I].Flags, DAG, dl);
1573 };
1574
1575 unsigned J = 0;
1577 for (const unsigned NumElts : VI) {
1579
1581 if (IsVAArg) {
1582
1583
1584 assert(NumElts == 1 &&
1585 "Vectorization should be disabled for vaargs.");
1586
1587
1590
1591 const EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;
1592 VAOffset += DL.getTypeAllocSize(TheStoreType.getTypeForEVT(Ctx));
1593 } else {
1594 assert(VAOffset == 0 && "VAOffset must be 0 for non-VA args");
1595 Offset = Offsets[J];
1596 }
1597
1600
1601 const MaybeAlign CurrentAlign = ExtendIntegerParam
1604
1607 return GetStoredValue(J + K);
1608 });
1609
1611 DAG.getStore(ArgDeclare, dl, Val, Ptr,
1613 CallPrereqs.push_back(StoreParam);
1614
1615 J += NumElts;
1616 }
1617 }
1618 }
1619
1620
1621 if (!Ins.empty()) {
1623 const unsigned ResultSize = DL.getTypeAllocSize(RetTy);
1625 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1626 MakeDeclareArrayParam(RetSymbol, RetAlign, ResultSize);
1627 } else {
1628 MakeDeclareScalarParam(RetSymbol, ResultSize);
1629 }
1630 }
1631
1632
1633
1634 if (VADeclareParam) {
1637 VADeclareParam.getOperand(2), GetI32(VAOffset),
1640 VADeclareParam->getVTList(), DeclareParamOps);
1641 }
1642
1644
1645
1647
1648
1649
1650
1651 const bool IsIndirectCall = (!Func && CB) || ConvertToIndirectCall;
1652
1654 Function* CalleeFunc = nullptr;
1655
1656
1658 assert(CalleeFunc != nullptr && "Libcall callee must be set.");
1659
1660
1661
1662 CalleeFunc->addFnAttr("nvptx-libcall-callee", "true");
1663 }
1664
1665 if (IsIndirectCall) {
1666
1667
1668
1669
1670
1671
1672
1674 std::string Proto =
1676 HasVAArgs ? std::optional(FirstVAArg) : std::nullopt, *CB,
1677 UniqueCallSite);
1678 const char *ProtoStr = nvTM->getStrPool().save(Proto).data();
1680 NVPTXISD::CallPrototype, dl, MVT::Other,
1682 CallPrereqs.push_back(PrototypeDeclare);
1683 }
1684
1685 const unsigned Proto = IsIndirectCall ? UniqueCallSite : 0;
1686 const unsigned NumArgs =
1687 std::min(CLI.NumFixedArgs + 1, Args.size());
1688
1689
1692 NVPTXISD::CALL, dl, MVT::Other,
1693 {CallToken, GetI32(CLI.IsConvergent), GetI32(IsIndirectCall),
1694 GetI32(Ins.empty() ? 0 : 1), GetI32(NumArgs), Callee, GetI32(Proto)});
1695
1698 if (!Ins.empty()) {
1702 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1703
1704 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1706
1707
1708
1709
1710 const bool ExtendIntegerRetVal =
1711 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
1712
1713 unsigned I = 0;
1715 for (const unsigned NumElts : VI) {
1717 ExtendIntegerRetVal ? MaybeAlign(std::nullopt)
1719
1721 const EVT LoadVT =
1722 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1726
1730
1731 LoadChains.push_back(R.getValue(1));
1732 for (const unsigned J : llvm::seq(NumElts))
1734 I += NumElts;
1735 }
1736 }
1737
1740 UniqueCallSite + 1, SDValue(), dl);
1741
1742
1743
1744
1747 DAG.getNode(NVPTXISD::ProxyReg, dl, Reg.getValueType(), {CallEnd, Reg});
1750 }
1751
1752
1753
1755 return CallEnd;
1756}
1757
1760
1761 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1763
1765 Fn,
1766 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "
1767 "requires target sm_52.",
1770 Op.getOperand(0)};
1772 }
1773
1775 SDValue Chain = Op.getOperand(0);
1778
1779
1780
1783
1784
1786
1788 DAG.getNode(NVPTXISD::DYNAMIC_STACKALLOC, DL, {LocalVT, MVT::Other},
1791
1794
1796}
1797
1801 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1803
1805 Fn,
1806 "Support for stackrestore requires PTX ISA version >= 7.3 and target "
1807 ">= sm_52.",
1808 DL.getDebugLoc()));
1809 return Op.getOperand(0);
1810 }
1811
1813 SDValue Chain = Op.getOperand(0);
1817 return DAG.getNode(NVPTXISD::STACKRESTORE, DL, MVT::Other, {Chain, ASC});
1818}
1819
1823 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1825
1827 Fn,
1828 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "
1829 "sm_52.",
1830 DL.getDebugLoc()));
1833 }
1834
1836 SDValue Chain = Op.getOperand(0);
1838 DAG.getNode(NVPTXISD::STACKSAVE, DL, {LocalVT, MVT::Other}, Chain);
1842}
1843
1844
1845
1846
1852 unsigned NumOperands = Node->getNumOperands();
1853 for (unsigned i = 0; i < NumOperands; ++i) {
1855 EVT VVT = SubOp.getNode()->getValueType(0);
1858 for (unsigned j = 0; j < NumSubElem; ++j) {
1861 }
1862 }
1864}
1865
1869 assert(A.getValueType() == MVT::i32 && B.getValueType() == MVT::i32 &&
1870 Selector.getValueType() == MVT::i32 && "PRMT must have i32 operands");
1871 return DAG.getNode(NVPTXISD::PRMT, DL, MVT::i32,
1873}
1874
1880
1881
1882
1883
1884
1885
1888 ArrayRef<std::pair<unsigned /*NodeType*/, unsigned /*NumInputs*/>> Ops,
1890
1892
1893 unsigned OpIdx = 0;
1894 while (Level.size() > 1) {
1895
1896 const auto [Op, NumInputs] = Ops[OpIdx];
1897
1898
1900 unsigned I = 0, E = Level.size();
1901 for (; I + NumInputs <= E; I += NumInputs) {
1902
1905 }
1906
1908
1909
1910 if (ReducedLevel.empty()) {
1911
1912
1914 assert(OpIdx < Ops.size() && "no smaller operators for reduction");
1915 continue;
1916 }
1917
1918
1919
1920
1923 }
1924
1925
1926 Level = ReducedLevel;
1927 }
1928
1929 return *Level.begin();
1930}
1931
1932
1934 switch (ReductionOpcode) {
1935 case ISD::VECREDUCE_FMAX:
1936 return ISD::FMAXNUM;
1937 case ISD::VECREDUCE_FMIN:
1938 return ISD::FMINNUM;
1939 case ISD::VECREDUCE_FMAXIMUM:
1940 return ISD::FMAXIMUM;
1941 case ISD::VECREDUCE_FMINIMUM:
1942 return ISD::FMINIMUM;
1943 default:
1945 }
1946}
1947
1948
1949static std::optional
1951 switch (ReductionOpcode) {
1952 case ISD::VECREDUCE_FMAX:
1953 return NVPTXISD::FMAXNUM3;
1954 case ISD::VECREDUCE_FMIN:
1955 return NVPTXISD::FMINNUM3;
1956 case ISD::VECREDUCE_FMAXIMUM:
1957 return NVPTXISD::FMAXIMUM3;
1958 case ISD::VECREDUCE_FMINIMUM:
1959 return NVPTXISD::FMINIMUM3;
1960 default:
1961 return std::nullopt;
1962 }
1963}
1964
1965
1966
1967
1971 const SDNodeFlags Flags = Op->getFlags();
1973
1974 const unsigned Opcode = Op->getOpcode();
1975 const EVT EltTy = Vector.getValueType().getVectorElementType();
1976
1977
1978 const bool CanUseMinMax3 =
1979 EltTy == MVT::f32 && STI.getSmVersion() >= 100 &&
1980 STI.getPTXVersion() >= 88 &&
1981 (Opcode == ISD::VECREDUCE_FMAX || Opcode == ISD::VECREDUCE_FMIN ||
1982 Opcode == ISD::VECREDUCE_FMAXIMUM || Opcode == ISD::VECREDUCE_FMINIMUM);
1983
1984
1985
1986 SmallVector<std::pair<unsigned , unsigned >, 2> ScalarOps;
1987
1989 CanUseMinMax3 && Opcode3Elem)
1990 ScalarOps.push_back({*Opcode3Elem, 3});
1992
1995
1997}
1998
2000
2001
2002 EVT FromVT = Op->getOperand(0)->getValueType(0);
2003 if (FromVT != MVT::v2i8) {
2004 return Op;
2005 }
2006
2007
2018 {Extend0, DAG.getNode(ISD::SHL, DL, MVT::i16, {Extend1, Const8})});
2019 EVT ToVT = Op->getValueType(0);
2021}
2022
2023
2024
2025
2026
2029 EVT VT = Op->getValueType(0);
2031 return Op;
2033
2035 return Operand->isUndef() || isa(Operand) ||
2036 isa(Operand);
2037 })) {
2038 if (VT != MVT::v4i8)
2039 return Op;
2040
2041
2043 uint64_t SelectionValue) -> SDValue {
2046 if (Cast) {
2049 }
2050 return getPRMT(L, R, SelectionValue, DL, DAG);
2051 };
2052 auto PRMT__10 = GetPRMT(Op->getOperand(0), Op->getOperand(1), true, 0x3340);
2053 auto PRMT__32 = GetPRMT(Op->getOperand(2), Op->getOperand(3), true, 0x3340);
2054 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32, false, 0x5410);
2055 return DAG.getBitcast(VT, PRMT3210);
2056 }
2057
2058
2059 auto GetOperand = [](SDValue Op, int N) -> APInt {
2060 const SDValue &Operand = Op->getOperand(N);
2061 EVT VT = Op->getValueType(0);
2063 return APInt(32, 0);
2065 if (VT == MVT::v2f16 || VT == MVT::v2bf16)
2067 else if (VT == MVT::v2i16 || VT == MVT::v4i8)
2069 else
2071
2072
2073 if (VT == MVT::v4i8)
2075 return Value.zext(32);
2076 };
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091 APInt Value(32, 0);
2093 assert(32 % NumElements == 0 && "must evenly divide bit length");
2094 const unsigned ShiftAmount = 32 / NumElements;
2095 for (unsigned ElementNo : seq(NumElements))
2096 Value |= GetOperand(Op, ElementNo).shl(ElementNo * ShiftAmount);
2098 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), Const);
2099}
2100
2101SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
2106 EVT VectorVT = Vector.getValueType();
2107
2108 if (VectorVT == MVT::v4i8) {
2115 SDNodeFlags Flags;
2119 return Ext;
2120 }
2121
2122
2124 return Op;
2125
2126
2130
2131 SDLoc dl(Op.getNode());
2138}
2139
2140SDValue NVPTXTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
2143 EVT VectorVT = Vector.getValueType();
2144
2145 if (VectorVT != MVT::v4i8)
2146 return Op;
2149 if (Value->isUndef())
2151
2153
2155 DAG.getNode(NVPTXISD::BFI, DL, MVT::i32,
2161 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), BFI);
2162}
2163
2168 if (VectorVT != MVT::v4i8 || Op.getValueType() != MVT::v4i8)
2169 return Op;
2170
2171
2174 uint32_t Selector = 0;
2176 if (I.value() != -1)
2177 Selector |= (I.value() << (I.index() * 4));
2178 }
2179
2182 DAG.getBitcast(MVT::i32, V2), Selector, DL, DAG);
2183 return DAG.getBitcast(Op.getValueType(), PRMT);
2184}
2185
2186
2187
2188
2189
2190SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
2194
2195 EVT VT = Op.getValueType();
2197 SDLoc dl(Op);
2198 SDValue ShOpLo = Op.getOperand(0);
2199 SDValue ShOpHi = Op.getOperand(1);
2200 SDValue ShAmt = Op.getOperand(2);
2202
2203 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2204
2205
2206
2207
2208
2211 DAG.getNode(NVPTXISD::FSHR_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2212
2215 }
2216 else {
2217
2218
2219
2220
2221
2222
2223
2224
2227 ShAmt);
2234
2240
2243 }
2244}
2245
2246
2247
2248
2249
2250
2255
2256 EVT VT = Op.getValueType();
2258 SDLoc dl(Op);
2259 SDValue ShOpLo = Op.getOperand(0);
2260 SDValue ShOpHi = Op.getOperand(1);
2261 SDValue ShAmt = Op.getOperand(2);
2262
2263 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2264
2265
2266
2267
2268
2270 DAG.getNode(NVPTXISD::FSHL_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2272
2275 }
2276 else {
2277
2278
2279
2280
2281
2282
2283
2284
2287 ShAmt);
2294
2300
2303 }
2304}
2305
2306
2307
2310 EVT VT = Op.getValueType();
2312
2316
2317 if (!SrcVT.bitsEq(VT))
2319
2320 return DAG.getNode(NVPTXISD::FCOPYSIGN, DL, VT, In1, In2);
2321}
2322
2324 EVT VT = Op.getValueType();
2325
2326 if (VT == MVT::f32)
2327 return LowerFROUND32(Op, DAG);
2328
2329 if (VT == MVT::f64)
2330 return LowerFROUND64(Op, DAG);
2331
2333}
2334
2335
2336
2337
2338
2339
2340
2341
2344 SDLoc SL(Op);
2346 EVT VT = Op.getValueType();
2347
2349
2350
2352 const unsigned SignBitMask = 0x80000000;
2354 DAG.getConstant(SignBitMask, SL, MVT::i32));
2355 const unsigned PointFiveInBits = 0x3F000000;
2356 SDValue PointFiveWithSignRaw =
2358 DAG.getConstant(PointFiveInBits, SL, MVT::i32));
2359 SDValue PointFiveWithSign =
2360 DAG.getNode(ISD::BITCAST, SL, VT, PointFiveWithSignRaw);
2362 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2363
2364
2370
2371
2374 SDValue RoundedAForSmallA = DAG.getNode(ISD::FTRUNC, SL, VT, A);
2375 return DAG.getNode(ISD::SELECT, SL, VT, IsSmall, RoundedAForSmallA, RoundedA);
2376}
2377
2378
2379
2380
2381
2382
2385 SDLoc SL(Op);
2387 EVT VT = Op.getValueType();
2388
2390
2391
2394 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2395
2396
2402 RoundedA);
2403
2404
2406 DAG.getNode(ISD::FTRUNC, SL, VT, A);
2407
2408
2413}
2414
2416 EVT VT = N->getValueType(0);
2417 EVT NVT = MVT::f32;
2420 }
2424 SDValue Res = DAG.getNode(N->getOpcode(), DL, NVT, Tmp0, Tmp1, N->getFlags());
2426}
2427
2428SDValue NVPTXTargetLowering::PromoteBinOpIfF32FTZ(SDValue Op,
2432 }
2433 return Op;
2434}
2435
2438 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2439
2440 if (Op.getValueType() == MVT::bf16) {
2441 SDLoc Loc(Op);
2444 DAG.getNode(Op.getOpcode(), Loc, MVT::f32, Op.getOperand(0)),
2446 }
2447
2448
2449 return Op;
2450}
2451
2454 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2455
2456 if (Op.getOperand(0).getValueType() == MVT::bf16) {
2457 SDLoc Loc(Op);
2459 Op.getOpcode(), Loc, Op.getValueType(),
2460 DAG.getNode(ISD::FP_EXTEND, Loc, MVT::f32, Op.getOperand(0)));
2461 }
2462
2463
2464 return Op;
2465}
2466
2469 EVT NarrowVT = Op.getValueType();
2470 SDValue Wide = Op.getOperand(0);
2473 const TargetLowering *TLI = STI.getTargetLowering();
2474 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 70) {
2476 }
2477 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
2478
2479 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70) {
2481 return Op;
2482 }
2484 SDLoc Loc(Op);
2485
2486
2489 : MVT::f32,
2490 Wide, Loc, DAG);
2492 }
2493 }
2495 }
2496 }
2497
2498
2499 return Op;
2500}
2501
2504 SDValue Narrow = Op.getOperand(0);
2506 EVT WideVT = Op.getValueType();
2509 (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71)) {
2510 SDLoc Loc(Op);
2511 return DAG.getNode(ISD::BF16_TO_FP, Loc, WideVT, Narrow);
2512 }
2514 (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78)) {
2516 : MVT::f32;
2517 SDLoc Loc(Op);
2518 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 71) {
2519 Op = DAG.getNode(ISD::FP_EXTEND, Loc, F32, Narrow);
2520 } else {
2521 Op = DAG.getNode(ISD::BF16_TO_FP, Loc, F32, Narrow);
2522 }
2523 return DAG.getNode(ISD::FP_EXTEND, Loc, WideVT, Op);
2524 }
2525 }
2526
2527
2528 return Op;
2529}
2530
2533 if (Op.getValueType() != MVT::v2i16)
2534 return Op;
2535 EVT EltVT = Op.getValueType().getVectorElementType();
2537 for (int I = 0, E = Op.getValueType().getVectorNumElements(); I < E; I++) {
2540 [&](const SDUse &O) {
2541 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
2542 O.get(), DAG.getIntPtrConstant(I, DL));
2543 });
2545 }
2548 return V;
2549}
2550
2555
2556
2557 for (size_t I = 0; I < N->getNumOperands(); I++) {
2565 } else
2566 Ops.push_back(Val);
2567 }
2568
2573
2574 return Tcgen05StNode;
2575}
2576
2580 EVT VT = Op.getValueType();
2581
2583 case MVT::i16: {
2588 }
2589 case MVT::i32: {
2591 }
2592 case MVT::v2i16: {
2596 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i16, Swapped);
2597 }
2598 case MVT::i64: {
2603 DL, DAG);
2606 DL, DAG);
2607 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64,
2608 {SwappedHigh, SwappedLow});
2609 }
2610 default:
2612 }
2613}
2614
2616 switch (IID) {
2617 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2618 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1;
2619 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2620 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2;
2621 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2622 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2623 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2624 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2625 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2626 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2627 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2628 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2629 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2630 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2631 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2632 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2633 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2634 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2635 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2636 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2637 case Intrinsic::
2638 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2639 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2640 case Intrinsic::
2641 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2642 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2643 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2644 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1;
2645 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2646 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2;
2647 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2648 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2649 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2650 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2651 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2652 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2653 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2654 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2655 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2656 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2657 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2658 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2659 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2660 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2661 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2662 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2663 case Intrinsic::
2664 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2665 return NVPTXISD::
2666 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2667 case Intrinsic::
2668 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2669 return NVPTXISD::
2670 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2671 };
2672 llvm_unreachable("unhandled tcgen05.mma.disable_output_lane intrinsic");
2673}
2674
2679
2681
2682 for (size_t I = 0; I < N->getNumOperands(); I++) {
2683 if (I == 1)
2684 continue;
2692 } else
2693 Ops.push_back(Val);
2694 }
2695
2700
2701 return Tcgen05MMANode;
2702}
2703
2704
2705static std::optional<std::pair<SDValue, SDValue>>
2708 EVT ResVT = N->getValueType(0);
2710 return {};
2711
2713
2714
2716 for (unsigned i = 0; i < NumElts; ++i)
2718
2719 ListVTs.push_back(N->getValueType(1));
2720
2722
2724 N->getOperand(2)};
2725
2726 if (HasOffset) {
2727 Ops.push_back(N->getOperand(3));
2728 Ops.push_back(N->getOperand(4));
2729 } else
2730 Ops.push_back(N->getOperand(3));
2731
2736
2737
2739 for (unsigned i = 0; i < NumElts; ++i) {
2742 }
2743
2746 return {{BuildVector, Chain}};
2747}
2748
2751 SDValue Intrin = N->getOperand(1);
2752
2753
2755 switch (IntrinNo) {
2756 default:
2757 break;
2758 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
2759 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
2760 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
2761 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
2762 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
2763 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
2764 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
2765 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
2766 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
2767 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
2768 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
2769 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
2770 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
2771 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
2772 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
2773 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
2774 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
2775 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
2776 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
2777 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
2778 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1:
2779 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2:
2780 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4:
2781 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8:
2782 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16:
2783 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32:
2784 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64:
2785 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128:
2786 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
2787 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
2788 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
2789 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
2790 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
2791 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
2792 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
2793 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
2794 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
2796 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2797 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2798 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2799 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2800 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2801 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2802 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2803 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2804 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2805 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2806 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2807 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2808 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2809 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2810 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2811 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2812 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2813 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2814 case Intrinsic::
2815 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2816 case Intrinsic::
2817 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2818 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2819 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2820 case Intrinsic::
2821 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2822 case Intrinsic::
2823 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2825 }
2826 return Op;
2827}
2828
2831
2833 if (N->getOperand(1).getValueType() != MVT::i128) {
2834
2836 }
2837
2838 unsigned IID =
2840 auto Opcode = [&]() {
2841 switch (IID) {
2842 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2843 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED;
2844 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2845 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X;
2846 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2847 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y;
2848 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2849 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z;
2850 default:
2852 }
2853 }();
2854
2856 SDValue TryCancelResponse = N->getOperand(1);
2857 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TryCancelResponse);
2858 SDValue TryCancelResponse0 =
2861 SDValue TryCancelResponse1 =
2864
2865 return DAG.getNode(Opcode, DL, N->getVTList(),
2866 {TryCancelResponse0, TryCancelResponse1});
2867}
2868
2872 SDValue F32Vec = N->getOperand(1);
2873 SDValue RBits = N->getOperand(2);
2874
2875 unsigned IntrinsicID = N->getConstantOperandVal(0);
2876
2877
2879 for (unsigned i = 0; i < 4; ++i)
2882
2884
2885 auto [OpCode, RetTy, CvtModeFlag] =
2886 [&]() -> std::tuple<unsigned, MVT::SimpleValueType, uint32_t> {
2887 switch (IntrinsicID) {
2888 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
2889 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8,
2890 CvtMode::RS | CvtMode::RELU_FLAG};
2891 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
2892 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2893 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
2894 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8,
2895 CvtMode::RS | CvtMode::RELU_FLAG};
2896 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
2897 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2898 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
2899 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8,
2900 CvtMode::RS | CvtMode::RELU_FLAG};
2901 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
2902 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2903 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
2904 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8,
2905 CvtMode::RS | CvtMode::RELU_FLAG};
2906 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
2907 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2908 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
2909 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16,
2910 CvtMode::RS | CvtMode::RELU_FLAG};
2911 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
2912 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16, CvtMode::RS};
2913 default:
2915 }
2916 }();
2917
2918 Ops.push_back(RBits);
2920
2922}
2923
2925 const unsigned Mode = [&]() {
2926 switch (Op->getConstantOperandVal(0)) {
2927 case Intrinsic::nvvm_prmt:
2929 case Intrinsic::nvvm_prmt_b4e:
2931 case Intrinsic::nvvm_prmt_ecl:
2933 case Intrinsic::nvvm_prmt_ecr:
2935 case Intrinsic::nvvm_prmt_f4e:
2937 case Intrinsic::nvvm_prmt_rc16:
2939 case Intrinsic::nvvm_prmt_rc8:
2941 default:
2943 }
2944 }();
2947 SDValue B = Op.getNumOperands() == 4 ? Op.getOperand(2)
2949 SDValue Selector = (Op->op_end() - 1)->get();
2951}
2952
2954 switch (Op->getConstantOperandVal(1)) {
2955 default:
2956 return Op;
2957
2958
2959
2960 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
2961 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
2962 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
2966
2967 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
2968 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG, true))
2971 }
2972}
2973
2975 switch (Op->getConstantOperandVal(0)) {
2976 default:
2977 return Op;
2978 case Intrinsic::nvvm_prmt:
2979 case Intrinsic::nvvm_prmt_b4e:
2980 case Intrinsic::nvvm_prmt_ecl:
2981 case Intrinsic::nvvm_prmt_ecr:
2982 case Intrinsic::nvvm_prmt_f4e:
2983 case Intrinsic::nvvm_prmt_rc16:
2984 case Intrinsic::nvvm_prmt_rc8:
2986 case Intrinsic::nvvm_internal_addrspace_wrap:
2987 return Op.getOperand(1);
2988 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2989 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2990 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2991 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2993 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
2994 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
2995 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
2996 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
2997 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
2998 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
2999 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
3000 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
3001 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
3002 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
3004 }
3005}
3006
3007
3008
3009
3012 assert(V.getValueType() == MVT::i64 &&
3013 "Unexpected CTLZ/CTPOP type to legalize");
3014
3018}
3019
3022 assert(A.getValueType() == MVT::i64 && B.getValueType() == MVT::i64);
3023
3025 if (!AmtConst)
3027 const auto Amt = AmtConst->getZExtValue() & 63;
3028
3033
3034
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053 auto [High, Mid, Low] = ((Opcode == ISD::FSHL) == (Amt < 32))
3054 ? std::make_tuple(AHi, ALo, BHi)
3055 : std::make_tuple(ALo, BHi, BLo);
3056
3060
3061 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64, {RLo, RHi});
3062}
3063
3068
3074
3076
3077
3078
3082 EVT Ty = Op.getValueType();
3084
3086 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, Ty, Div, Flags);
3091
3092 if (Flags.hasNoInfs())
3093 return Sub;
3094
3095
3101}
3102
3104 assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
3105
3107 SDValue TrueVal = Op->getOperand(1);
3108 SDValue FalseVal = Op->getOperand(2);
3110
3111
3114 TrueVal = TrueVal.getOperand(0);
3115 FalseVal = FalseVal.getOperand(0);
3116
3117 EVT VT = TrueVal.getSimpleValueType().bitsLE(FalseVal.getSimpleValueType())
3118 ? TrueVal.getValueType()
3119 : FalseVal.getValueType();
3124 }
3125
3126
3127
3128 TrueVal = DAG.getFreeze(TrueVal);
3129 FalseVal = DAG.getFreeze(FalseVal);
3134 return Or;
3135}
3136
3139
3140 SDValue Chain = N->getOperand(0);
3141 SDValue Val = N->getOperand(1);
3142 SDValue BasePtr = N->getOperand(2);
3144 SDValue Mask = N->getOperand(4);
3145
3149 assert(ValVT.isVector() && "Masked vector store must have vector type");
3151 "Unexpected alignment for masked store");
3152
3153 unsigned Opcode = 0;
3155 default:
3157 case MVT::v4i64:
3158 case MVT::v4f64: {
3160 break;
3161 }
3162 case MVT::v8i32:
3163 case MVT::v8f32: {
3165 break;
3166 }
3167 }
3168
3170
3171
3172 Ops.push_back(Chain);
3173
3174
3175
3176 assert(Mask.getValueType().isVector() &&
3177 Mask.getValueType().getVectorElementType() == MVT::i1 &&
3178 "Mask must be a vector of i1");
3180 "Mask expected to be a BUILD_VECTOR");
3181 assert(Mask.getValueType().getVectorNumElements() ==
3183 "Mask size must be the same as the vector size");
3184 for (auto [I, Op] : enumerate(Mask->ops())) {
3185
3186 if (Op.getNode()->getAsZExtVal() == 0) {
3187
3188
3191 } else {
3192
3196 Ops.push_back(ExtVal);
3197 }
3198 }
3199
3200
3201 Ops.push_back(BasePtr);
3202
3203
3204
3205
3207 "Offset operand expected to be undef");
3209
3213
3214 return NewSt;
3215}
3216
3219 switch (Op.getOpcode()) {
3224 case ISD::ADDRSPACECAST:
3225 return LowerADDRSPACECAST(Op, DAG);
3233 return LowerBUILD_VECTOR(Op, DAG);
3234 case ISD::BITCAST:
3235 return LowerBITCAST(Op, DAG);
3237 return Op;
3239 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3241 return LowerINSERT_VECTOR_ELT(Op, DAG);
3243 return LowerVECTOR_SHUFFLE(Op, DAG);
3245 return LowerCONCAT_VECTORS(Op, DAG);
3246 case ISD::VECREDUCE_FMAX:
3247 case ISD::VECREDUCE_FMIN:
3248 case ISD::VECREDUCE_FMAXIMUM:
3249 case ISD::VECREDUCE_FMINIMUM:
3250 return LowerVECREDUCE(Op, DAG);
3251 case ISD::STORE:
3252 return LowerSTORE(Op, DAG);
3253 case ISD::MSTORE: {
3254 assert(STI.has256BitVectorLoadStore(
3256 "Masked store vector not supported on subtarget.");
3258 }
3259 case ISD::LOAD:
3260 return LowerLOAD(Op, DAG);
3261 case ISD::MLOAD:
3262 return LowerMLOAD(Op, DAG);
3264 return LowerShiftLeftParts(Op, DAG);
3267 return LowerShiftRightParts(Op, DAG);
3270 case ISD::FROUND:
3271 return LowerFROUND(Op, DAG);
3273 return LowerFCOPYSIGN(Op, DAG);
3276 return LowerINT_TO_FP(Op, DAG);
3279 return LowerFP_TO_INT(Op, DAG);
3281 return LowerFP_ROUND(Op, DAG);
3282 case ISD::FP_EXTEND:
3283 return LowerFP_EXTEND(Op, DAG);
3284 case ISD::BR_JT:
3285 return LowerBR_JT(Op, DAG);
3286 case ISD::VAARG:
3287 return LowerVAARG(Op, DAG);
3288 case ISD::VASTART:
3289 return LowerVASTART(Op, DAG);
3308 case ISD::DYNAMIC_STACKALLOC:
3310 case ISD::STACKRESTORE:
3312 case ISD::STACKSAVE:
3315 return LowerCopyToReg_128(Op, DAG);
3319
3320 return PromoteBinOpIfF32FTZ(Op, DAG);
3328 default:
3329 llvm_unreachable("Custom lowering not defined for operation");
3330 }
3331}
3332
3335 SDValue Chain = Op.getOperand(0);
3337 SDValue Index = Op.getOperand(2);
3338
3339 unsigned JId = JT->getIndex();
3342
3344
3345
3347 Chain = DAG.getNode(NVPTXISD::BrxStart, DL, VTs, Chain, IdV);
3348
3349
3352 Chain = DAG.getNode(NVPTXISD::BrxItem, DL, VTs, Chain.getValue(0),
3354
3355
3358 SDValue BrxEnd = DAG.getNode(NVPTXISD::BrxEnd, DL, MVT::Other, EndOps);
3359
3360 return BrxEnd;
3361}
3362
3363
3367
3371 unsigned SrcAS = N->getSrcAddressSpace();
3372 unsigned DestAS = N->getDestAddressSpace();
3375
3376
3382 const MVT GenerictVT =
3386 SDValue SharedClusterConversion =
3389 return SharedClusterConversion;
3390 }
3391
3392 return DAG.getUNDEF(Op.getValueType());
3393 }
3394
3395 return Op;
3396}
3397
3398
3399
3401 const TargetLowering *TLI = STI.getTargetLowering();
3403
3404 SDNode *Node = Op.getNode();
3406 EVT VT = Node->getValueType(0);
3410 const MaybeAlign MA(Node->getConstantOperandVal(3));
3411
3413 Tmp1, Tmp2, MachinePointerInfo(V));
3414 SDValue VAList = VAListLoad;
3415
3420
3424 }
3425
3426
3430
3431
3433 MachinePointerInfo(V));
3434
3437
3438
3439 return DAG.getLoad(VT, DL, Tmp1, VAList, MachinePointerInfo(SrcV));
3440}
3441
3443 const TargetLowering *TLI = STI.getTargetLowering();
3446
3447
3448 SDValue VAReg = getParamSymbol(DAG, -1, PtrVT);
3449
3451 return DAG.getStore(Op.getOperand(0), DL, VAReg, Op.getOperand(1),
3452 MachinePointerInfo(SV));
3453}
3454
3455static std::pair<MemSDNode *, uint32_t>
3458 SDValue Chain = N->getOperand(0);
3459 SDValue BasePtr = N->getOperand(1);
3460 SDValue Mask = N->getOperand(3);
3461 [[maybe_unused]] SDValue Passthru = N->getOperand(4);
3462
3464 EVT ResVT = N->getValueType(0);
3465 assert(ResVT.isVector() && "Masked vector load must have vector type");
3466
3467
3468
3471 "Passthru operand expected to be poison or undef");
3472
3473
3474
3477 assert(ElementSizeInBits % 8 == 0 && "Unexpected element size");
3478 uint32_t ElementSizeInBytes = ElementSizeInBits / 8;
3479 uint32_t ElementMask = (1u << ElementSizeInBytes) - 1u;
3480
3482
3483
3484
3485 UsedBytesMask <<= ElementSizeInBytes;
3486
3487
3488 if (Op->getAsZExtVal() != 0)
3489 UsedBytesMask |= ElementMask;
3490 }
3491
3492 assert(UsedBytesMask != 0 && UsedBytesMask != UINT32_MAX &&
3493 "Unexpected masked load with elements masked all on or all off");
3494
3495
3497 DAG.getLoad(ResVT, DL, Chain, BasePtr, N->getMemOperand()).getNode());
3498
3499
3500
3502 UsedBytesMask = UINT32_MAX;
3503
3504 return {NewLD, UsedBytesMask};
3505}
3506
3507
3508static std::optional<std::pair<SDValue, SDValue>>
3511 const EVT ResVT = LD->getValueType(0);
3512 const EVT MemVT = LD->getMemoryVT();
3513
3514
3515
3516 if (ResVT != MemVT)
3517 return std::nullopt;
3518
3519 const auto NumEltsAndEltVT =
3521 if (!NumEltsAndEltVT)
3522 return std::nullopt;
3523 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3524
3525 Align Alignment = LD->getAlign();
3528 if (Alignment < PrefAlign) {
3529
3530
3531
3532
3533
3534 return std::nullopt;
3535 }
3536
3537
3538 std::optional<uint32_t> UsedBytesMask = std::nullopt;
3539 if (LD->getOpcode() == ISD::MLOAD)
3540 std::tie(LD, UsedBytesMask) =
3542
3543
3544
3545
3546 const MVT LoadEltVT = (EltVT.getSizeInBits() < 16) ? MVT::i16 : EltVT;
3547
3548 unsigned Opcode;
3549 switch (NumElts) {
3550 default:
3551 return std::nullopt;
3552 case 2:
3554 break;
3555 case 4:
3557 break;
3558 case 8:
3560 break;
3561 }
3563 ListVTs.push_back(MVT::Other);
3565
3567
3568
3570
3572 DAG.getConstant(UsedBytesMask.value_or(UINT32_MAX), DL, MVT::i32));
3573
3574
3575
3578
3580 LD->getMemOperand());
3581
3587
3588
3589 for (const unsigned I : llvm::seq(NumElts)) {
3592 }
3593 } else {
3594 for (const unsigned I : llvm::seq(NumElts)) {
3596 if (LoadEltVT != EltVT)
3599 }
3600 }
3601
3603
3604 const MVT BuildVecVT =
3608
3609 return {{LoadValue, LoadChain}};
3610}
3611
3616 Results.append({Res->first, Res->second});
3617}
3618
3625
3626
3627
3628
3629
3633 assert(LD->getValueType(0) == MVT::i1 && "Custom lowering for i1 load only");
3635 LD->getBasePtr(), LD->getPointerInfo(),
3636 MVT::i8, LD->getAlign(),
3637 LD->getMemOperand()->getFlags());
3639
3640
3641
3642 return DAG.getMergeValues({result, LD->getChain()}, dl);
3643}
3644
3647
3648 if (Op.getValueType() == MVT::i1)
3650
3651
3652
3653
3655 assert(LD->getValueType(0).isInteger() && LD->getMemoryVT().isInteger() &&
3656 "Unexpected fpext-load");
3658 LD->getChain(), LD->getBasePtr(), LD->getMemoryVT(),
3659 LD->getMemOperand());
3660 }
3661
3663}
3664
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675 EVT VT = Op.getValueType();
3679 MemSDNode *LD = std::get<0>(Result);
3680 uint32_t UsedBytesMask = std::get<1>(Result);
3681
3682 SDLoc DL(LD);
3683
3684
3686
3687 OtherOps.push_back(DAG.getConstant(UsedBytesMask, DL, MVT::i32));
3688
3689
3690
3691 OtherOps.push_back(
3695 LD->getMemoryVT(), LD->getMemOperand());
3696 return NewLD;
3697 }
3699}
3700
3704 SDValue Val = N->getOperand(1);
3707 const EVT MemVT = N->getMemoryVT();
3708
3709
3710
3711 if (ValVT != MemVT)
3713
3714 const auto NumEltsAndEltVT =
3716 if (!NumEltsAndEltVT)
3718 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3719
3721
3722 Align Alignment = N->getAlign();
3724 if (Alignment < PrefAlign) {
3725
3726
3727
3728
3729
3731 }
3732
3733 unsigned Opcode;
3734 switch (NumElts) {
3735 default:
3737 case 2:
3739 break;
3740 case 4:
3742 break;
3743 case 8:
3745 break;
3746 }
3747
3749
3750
3751 Ops.push_back(N->getOperand(0));
3752
3753
3758
3759
3761 for (const unsigned I : llvm::seq(NumElts)) {
3764 NumEltsPerSubVector);
3766 }
3767 } else {
3769 for (const unsigned I : llvm::seq(NumElts)) {
3772
3773
3774
3775
3776
3779 Ops.push_back(ExtVal);
3780 }
3781 }
3782
3783
3784 Ops.append(N->op_begin() + 2, N->op_end());
3785
3788 N->getMemoryVT(), N->getMemOperand());
3789
3790
3791 return NewSt;
3792}
3793
3796 EVT VT = Store->getMemoryVT();
3797
3798 if (VT == MVT::i1)
3799 return LowerSTOREi1(Op, DAG);
3800
3801
3802
3804}
3805
3806
3807
3808
3809
3811 SDNode *Node = Op.getNode();
3812 SDLoc dl(Node);
3815 SDValue Tmp2 = ST->getBasePtr();
3817 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
3820 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
3821 ST->getAlign(), ST->getMemOperand()->getFlags());
3823}
3824
3827
3828
3829
3830 assert(Op.getOperand(1).getValueType() == MVT::i128 &&
3831 "Custom lowering for 128-bit CopyToReg only");
3832
3833 SDNode *Node = Op.getNode();
3834 SDLoc DL(Node);
3835
3841
3844
3845 NewOps[0] = Op->getOperand(0);
3846 NewOps[1] = Op->getOperand(1);
3847 NewOps[2] = Lo;
3848 NewOps[3] = Hi;
3850 NewOps[4] = Op->getOperand(3);
3851
3853}
3854
3855unsigned NVPTXTargetLowering::getNumRegisters(
3857 std::optional RegisterVT = std::nullopt) const {
3858 if (VT == MVT::i128 && RegisterVT == MVT::i128)
3859 return 1;
3861}
3862
3863bool NVPTXTargetLowering::splitValueIntoRegisterParts(
3865 unsigned NumParts, MVT PartVT, std::optionalCallingConv::ID CC) const {
3866 if (Val.getValueType() == MVT::i128 && NumParts == 1) {
3867 Parts[0] = Val;
3868 return true;
3869 }
3870 return false;
3871}
3872
3873
3874
3875
3876
3879 StringRef SavedStr = nvTM->getStrPool().save(
3882}
3883
3886 const StringRef SavedStr = nvTM->getStrPool().save("param" + Twine(I));
3888}
3889
3897
3899
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913 auto AllIns = ArrayRef(Ins);
3914 for (const auto &Arg : F.args()) {
3915 const auto ArgIns = AllIns.take_while(
3916 [&](auto I) { return I.OrigArgIndex == Arg.getArgNo(); });
3917 AllIns = AllIns.drop_front(ArgIns.size());
3918
3919 Type *Ty = Arg.getType();
3920
3921 if (ArgIns.empty())
3923
3924 if (Arg.use_empty()) {
3925
3926 for (const auto &In : ArgIns) {
3927 assert(!In.Used && "Arg.use_empty() is true but Arg is used?");
3929 }
3930 continue;
3931 }
3932
3933 SDValue ArgSymbol = getParamSymbol(DAG, Arg.getArgNo(), PtrVT);
3934
3935
3936
3937
3938
3939 if (Arg.hasByValAttr()) {
3940
3941
3942
3943
3944
3945
3946
3947 assert(ArgIns.size() == 1 && "ByVal argument must be a pointer");
3948 const auto &ByvalIn = ArgIns[0];
3950 "Ins type did not match function type");
3951 assert(ByvalIn.VT == PtrVT && "ByVal argument must be a pointer");
3952
3955 P = ArgSymbol;
3956 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3957 } else {
3958 P = DAG.getNode(NVPTXISD::MoveParam, dl, ByvalIn.VT, ArgSymbol);
3959 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3962 }
3964 } else {
3968 assert(VTs.size() == ArgIns.size() && "Size mismatch");
3969 assert(VTs.size() == Offsets.size() && "Size mismatch");
3970
3972 &F, Ty, Arg.getArgNo() + AttributeList::FirstArgIndex, DL);
3973
3974 unsigned I = 0;
3976 for (const unsigned NumElts : VI) {
3977
3978 const EVT LoadVT = VTs[I] == MVT::i1 ? MVT::i8 : VTs[I];
3980
3983
3986 DAG.getLoad(VecVT, dl, Root, VecAddr,
3990 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3991 for (const unsigned J : llvm::seq(NumElts)) {
3993
3995 DAG, dl);
3997 }
3998 I += NumElts;
3999 }
4000 }
4001 }
4002
4003 if (!OutChains.empty())
4005
4006 return Chain;
4007}
4008
4011 bool isVarArg,
4016 Type *RetTy = F.getReturnType();
4017
4019 assert(OutVals.empty() && Outs.empty() && "Return value expected for void");
4020 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
4021 }
4022
4025
4028
4029
4030
4031
4032 const bool ExtendIntegerRetVal =
4033 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
4034
4038 assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
4039
4040 const auto GetRetVal = [&](unsigned I) -> SDValue {
4041 SDValue RetVal = OutVals[I];
4044 "OutVal type should always be legal");
4045
4047 const EVT StoreVT =
4048 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
4049 return correctParamType(RetVal, StoreVT, Outs[I].Flags, DAG, dl);
4050 };
4051
4052 unsigned I = 0;
4054 for (const unsigned NumElts : VI) {
4055 const MaybeAlign CurrentAlign = ExtendIntegerRetVal
4058
4060 NumElts, dl, DAG, [&](unsigned K) { return GetRetVal(I + K); });
4061
4064
4065 Chain = DAG.getStore(Chain, dl, Val, Ptr,
4067
4068 I += NumElts;
4069 }
4070
4071 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
4072}
4073
4077 if (Constraint.size() > 1)
4078 return;
4080}
4081
4082
4083
4084
4085
4086
4092 default:
4093 return false;
4094 case Intrinsic::nvvm_match_all_sync_i32p:
4095 case Intrinsic::nvvm_match_all_sync_i64p:
4097
4098
4099
4100 Info.memVT = MVT::i1;
4101
4102
4104 return true;
4105 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
4106 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
4107 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
4108 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
4109 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
4110 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
4111 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
4112 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
4113 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
4114 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
4115 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
4116 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
4117 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
4118 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
4119 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
4120 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
4121 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
4122 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
4123 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
4124 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
4125 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
4126 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
4127 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
4128 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
4130 Info.memVT = MVT::v8f16;
4131 Info.ptrVal = I.getArgOperand(0);
4132 Info.offset = 0;
4134 Info.align = Align(16);
4135 return true;
4136 }
4137 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
4138 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
4139 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
4140 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
4141 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
4142 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
4143 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
4144 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
4145 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:
4146 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:
4147 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:
4148 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:
4149 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
4150 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
4151 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
4152 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
4153 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
4154 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
4155 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
4156 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:
4157 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:
4158 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:
4159 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:
4160 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {
4162 Info.memVT = MVT::v2i32;
4163 Info.ptrVal = I.getArgOperand(0);
4164 Info.offset = 0;
4166 Info.align = Align(8);
4167 return true;
4168 }
4169
4170 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
4171 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
4172 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
4173 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
4174 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
4175 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
4176 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
4177 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
4178 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:
4179 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:
4180 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:
4181 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:
4182 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:
4183 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:
4184 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:
4185 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:
4186
4187 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
4188 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
4189 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
4190 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
4191 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
4192 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
4193 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
4194 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:
4195 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:
4196 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:
4197 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:
4198 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:
4199 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:
4200 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:
4201 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:
4202 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:
4203 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:
4204 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16:
4205 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8:
4206 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b4x16_p64:
4207 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b6x16_p32:
4208 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b4x16_p64:
4209 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b6x16_p32: {
4211 Info.memVT = MVT::v4i32;
4212 Info.ptrVal = I.getArgOperand(0);
4213 Info.offset = 0;
4215 Info.align = Align(16);
4216 return true;
4217 }
4218
4219 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
4220 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
4221 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
4222 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
4223 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
4224 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
4225 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
4226 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
4227
4228 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
4229 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
4230 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
4231 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
4232 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
4233 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
4234 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
4235 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
4236 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
4237 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
4238 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
4239 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
4240 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
4241 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
4242 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
4243 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
4244 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
4245 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
4246 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
4247 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:
4248 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:
4249 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16:
4250 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b4x16_p64:
4251 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b6x16_p32: {
4253 Info.memVT = MVT::i32;
4254 Info.ptrVal = I.getArgOperand(0);
4255 Info.offset = 0;
4257 Info.align = Align(4);
4258 return true;
4259 }
4260
4261 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
4262 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
4263 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
4264 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
4265 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
4266 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
4267 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
4268 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
4269 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
4270 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
4271 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
4272 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
4274 Info.memVT = MVT::v4f16;
4275 Info.ptrVal = I.getArgOperand(0);
4276 Info.offset = 0;
4278 Info.align = Align(16);
4279 return true;
4280 }
4281
4282 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
4283 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
4284 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
4285 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
4286 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
4287 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
4288 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
4289 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
4290 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
4291 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
4292 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
4293 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:
4294 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:
4295 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:
4296 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:
4297 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {
4299 Info.memVT = MVT::v8f32;
4300 Info.ptrVal = I.getArgOperand(0);
4301 Info.offset = 0;
4303 Info.align = Align(16);
4304 return true;
4305 }
4306
4307 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:
4308 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:
4309 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:
4310 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:
4311
4312 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:
4313 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:
4314 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:
4315 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:
4316
4317 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
4318 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
4319 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
4320 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
4321 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
4322 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
4323 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
4324 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
4325 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
4326 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
4327 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
4328 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
4330 Info.memVT = MVT::v8i32;
4331 Info.ptrVal = I.getArgOperand(0);
4332 Info.offset = 0;
4334 Info.align = Align(16);
4335 return true;
4336 }
4337
4338 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
4339 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
4340 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
4341 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
4342 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
4343 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
4344 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
4345 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:
4346 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:
4347 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16:
4348 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8:
4349 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b4x16_p64:
4350 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b6x16_p32:
4351 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b4x16_p64:
4352 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b6x16_p32: {
4354 Info.memVT = MVT::v2i32;
4355 Info.ptrVal = I.getArgOperand(0);
4356 Info.offset = 0;
4358 Info.align = Align(8);
4359 return true;
4360 }
4361
4362 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:
4363 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:
4364 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:
4365 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:
4366
4367 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:
4368 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:
4369 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:
4370 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {
4372 Info.memVT = MVT::f64;
4373 Info.ptrVal = I.getArgOperand(0);
4374 Info.offset = 0;
4376 Info.align = Align(8);
4377 return true;
4378 }
4379
4380 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:
4381 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:
4382 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:
4383 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {
4385 Info.memVT = MVT::v2f64;
4386 Info.ptrVal = I.getArgOperand(0);
4387 Info.offset = 0;
4389 Info.align = Align(16);
4390 return true;
4391 }
4392
4393 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
4394 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
4395 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
4396 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
4397 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
4398 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
4399 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
4400 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
4401 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
4402 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
4403 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
4404 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
4406 Info.memVT = MVT::v4f16;
4407 Info.ptrVal = I.getArgOperand(0);
4408 Info.offset = 0;
4410 Info.align = Align(16);
4411 return true;
4412 }
4413
4414 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
4415 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
4416 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
4417 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
4418 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
4419 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
4420 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
4421 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
4422 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
4423 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
4424 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
4425 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:
4426 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:
4427 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:
4428 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:
4429 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {
4431 Info.memVT = MVT::v8f32;
4432 Info.ptrVal = I.getArgOperand(0);
4433 Info.offset = 0;
4435 Info.align = Align(16);
4436 return true;
4437 }
4438
4439 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
4440 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
4441 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
4442 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
4443 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
4444 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
4445 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
4446 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
4447 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
4448 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
4449 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
4450 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
4452 Info.memVT = MVT::v8i32;
4453 Info.ptrVal = I.getArgOperand(0);
4454 Info.offset = 0;
4456 Info.align = Align(16);
4457 return true;
4458 }
4459
4460 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
4461 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
4462 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
4463 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
4464 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
4465 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
4466 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
4467 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride:
4468 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_b16:
4469 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_trans_b16:
4470 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x2_trans_b8: {
4472 Info.memVT = MVT::v2i32;
4473 Info.ptrVal = I.getArgOperand(0);
4474 Info.offset = 0;
4476 Info.align = Align(8);
4477 return true;
4478 }
4479
4480 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:
4481 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:
4482 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:
4483 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {
4485 Info.memVT = MVT::v2f64;
4486 Info.ptrVal = I.getArgOperand(0);
4487 Info.offset = 0;
4489 Info.align = Align(16);
4490 return true;
4491 }
4492
4493 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_b16:
4494 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_trans_b16:
4495 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x1_trans_b8: {
4497 Info.memVT = MVT::i32;
4498 Info.ptrVal = I.getArgOperand(0);
4499 Info.offset = 0;
4501 Info.align = Align(4);
4502 return true;
4503 }
4504
4505 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_b16:
4506 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_trans_b16:
4507 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x4_trans_b8: {
4509 Info.memVT = MVT::v4i32;
4510 Info.ptrVal = I.getArgOperand(0);
4511 Info.offset = 0;
4513 Info.align = Align(16);
4514 return true;
4515 }
4516
4517 case Intrinsic::nvvm_atomic_add_gen_f_cta:
4518 case Intrinsic::nvvm_atomic_add_gen_f_sys:
4519 case Intrinsic::nvvm_atomic_add_gen_i_cta:
4520 case Intrinsic::nvvm_atomic_add_gen_i_sys:
4521 case Intrinsic::nvvm_atomic_and_gen_i_cta:
4522 case Intrinsic::nvvm_atomic_and_gen_i_sys:
4523 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
4524 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
4525 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
4526 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
4527 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
4528 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
4529 case Intrinsic::nvvm_atomic_max_gen_i_cta:
4530 case Intrinsic::nvvm_atomic_max_gen_i_sys:
4531 case Intrinsic::nvvm_atomic_min_gen_i_cta:
4532 case Intrinsic::nvvm_atomic_min_gen_i_sys:
4533 case Intrinsic::nvvm_atomic_or_gen_i_cta:
4534 case Intrinsic::nvvm_atomic_or_gen_i_sys:
4535 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
4536 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
4537 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
4538 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
4539 auto &DL = I.getDataLayout();
4542 Info.ptrVal = I.getArgOperand(0);
4543 Info.offset = 0;
4545 Info.align.reset();
4546 return true;
4547 }
4548
4549 case Intrinsic::nvvm_prefetch_tensormap: {
4550 auto &DL = I.getDataLayout();
4553 Info.ptrVal = I.getArgOperand(0);
4554 Info.offset = 0;
4555 Info.flags =
4557 Info.align.reset();
4558 return true;
4559 }
4560
4561 case Intrinsic::nvvm_ldu_global_i:
4562 case Intrinsic::nvvm_ldu_global_f:
4563 case Intrinsic::nvvm_ldu_global_p: {
4565 Info.memVT = getValueType(I.getDataLayout(), I.getType());
4566 Info.ptrVal = I.getArgOperand(0);
4567 Info.offset = 0;
4569 Info.align = cast(I.getArgOperand(1))->getMaybeAlignValue();
4570
4571 return true;
4572 }
4573 case Intrinsic::nvvm_tex_1d_v4f32_s32:
4574 case Intrinsic::nvvm_tex_1d_v4f32_f32:
4575 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
4576 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
4577 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
4578 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
4579 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
4580 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
4581 case Intrinsic::nvvm_tex_2d_v4f32_s32:
4582 case Intrinsic::nvvm_tex_2d_v4f32_f32:
4583 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
4584 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
4585 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
4586 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
4587 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
4588 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
4589 case Intrinsic::nvvm_tex_3d_v4f32_s32:
4590 case Intrinsic::nvvm_tex_3d_v4f32_f32:
4591 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
4592 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
4593 case Intrinsic::nvvm_tex_cube_v4f32_f32:
4594 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
4595 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
4596 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
4597 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
4598 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
4599 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
4600 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
4601 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
4602 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
4603 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
4604 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
4605 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
4606 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
4607 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
4608 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
4609 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
4610 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
4611 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
4612 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
4613 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
4614 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
4615 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
4616 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
4617 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
4618 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
4619 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
4620 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
4621 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
4622 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
4623 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
4624 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
4625 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
4626 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
4627 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
4628 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
4629 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
4630 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
4632 Info.memVT = MVT::v4f32;
4633 Info.ptrVal = nullptr;
4634 Info.offset = 0;
4636 Info.align = Align(16);
4637 return true;
4638
4639 case Intrinsic::nvvm_tex_1d_v4s32_s32:
4640 case Intrinsic::nvvm_tex_1d_v4s32_f32:
4641 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
4642 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
4643 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
4644 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
4645 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
4646 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
4647 case Intrinsic::nvvm_tex_2d_v4s32_s32:
4648 case Intrinsic::nvvm_tex_2d_v4s32_f32:
4649 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
4650 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
4651 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
4652 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
4653 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
4654 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
4655 case Intrinsic::nvvm_tex_3d_v4s32_s32:
4656 case Intrinsic::nvvm_tex_3d_v4s32_f32:
4657 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
4658 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
4659 case Intrinsic::nvvm_tex_cube_v4s32_f32:
4660 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
4661 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
4662 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
4663 case Intrinsic::nvvm_tex_cube_v4u32_f32:
4664 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
4665 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
4666 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
4667 case Intrinsic::nvvm_tex_1d_v4u32_s32:
4668 case Intrinsic::nvvm_tex_1d_v4u32_f32:
4669 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
4670 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
4671 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
4672 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
4673 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
4674 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
4675 case Intrinsic::nvvm_tex_2d_v4u32_s32:
4676 case Intrinsic::nvvm_tex_2d_v4u32_f32:
4677 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
4678 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
4679 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
4680 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
4681 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
4682 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
4683 case Intrinsic::nvvm_tex_3d_v4u32_s32:
4684 case Intrinsic::nvvm_tex_3d_v4u32_f32:
4685 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
4686 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
4687 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
4688 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
4689 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
4690 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
4691 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
4692 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
4693 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
4694 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
4695 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
4696 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
4697 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
4698 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
4699 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
4700 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
4701 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
4702 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
4703 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
4704 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
4705 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
4706 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
4707 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
4708 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
4709 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
4710 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
4711 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
4712 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
4713 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
4714 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
4715 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
4716 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
4717 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
4718 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
4719 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
4720 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
4721 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
4722 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
4723 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
4724 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
4725 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
4726 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
4727 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
4728 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
4729 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
4730 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
4731 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
4732 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
4733 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
4734 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
4735 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
4736 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
4737 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
4738 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
4739 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
4740 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
4741 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
4742 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
4743 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
4744 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
4745 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
4746 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
4747 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
4748 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
4749 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
4750 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
4751 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
4752 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
4753 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
4754 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
4756 Info.memVT = MVT::v4i32;
4757 Info.ptrVal = nullptr;
4758 Info.offset = 0;
4760 Info.align = Align(16);
4761 return true;
4762
4763 case Intrinsic::nvvm_suld_1d_i8_clamp:
4764 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
4765 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
4766 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
4767 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
4768 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
4769 case Intrinsic::nvvm_suld_2d_i8_clamp:
4770 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4771 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4772 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4773 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4774 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4775 case Intrinsic::nvvm_suld_3d_i8_clamp:
4776 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4777 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4778 case Intrinsic::nvvm_suld_1d_i8_trap:
4779 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4780 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4781 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4782 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
4783 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
4784 case Intrinsic::nvvm_suld_2d_i8_trap:
4785 case Intrinsic::nvvm_suld_2d_v2i8_trap:
4786 case Intrinsic::nvvm_suld_2d_v4i8_trap:
4787 case Intrinsic::nvvm_suld_2d_array_i8_trap:
4788 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
4789 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
4790 case Intrinsic::nvvm_suld_3d_i8_trap:
4791 case Intrinsic::nvvm_suld_3d_v2i8_trap:
4792 case Intrinsic::nvvm_suld_3d_v4i8_trap:
4793 case Intrinsic::nvvm_suld_1d_i8_zero:
4794 case Intrinsic::nvvm_suld_1d_v2i8_zero:
4795 case Intrinsic::nvvm_suld_1d_v4i8_zero:
4796 case Intrinsic::nvvm_suld_1d_array_i8_zero:
4797 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
4798 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
4799 case Intrinsic::nvvm_suld_2d_i8_zero:
4800 case Intrinsic::nvvm_suld_2d_v2i8_zero:
4801 case Intrinsic::nvvm_suld_2d_v4i8_zero:
4802 case Intrinsic::nvvm_suld_2d_array_i8_zero:
4803 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
4804 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
4805 case Intrinsic::nvvm_suld_3d_i8_zero:
4806 case Intrinsic::nvvm_suld_3d_v2i8_zero:
4807 case Intrinsic::nvvm_suld_3d_v4i8_zero:
4809 Info.memVT = MVT::i8;
4810 Info.ptrVal = nullptr;
4811 Info.offset = 0;
4813 Info.align = Align(16);
4814 return true;
4815
4816 case Intrinsic::nvvm_suld_1d_i16_clamp:
4817 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
4818 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
4819 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
4820 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
4821 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
4822 case Intrinsic::nvvm_suld_2d_i16_clamp:
4823 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
4824 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
4825 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
4826 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
4827 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
4828 case Intrinsic::nvvm_suld_3d_i16_clamp:
4829 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
4830 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
4831 case Intrinsic::nvvm_suld_1d_i16_trap:
4832 case Intrinsic::nvvm_suld_1d_v2i16_trap:
4833 case Intrinsic::nvvm_suld_1d_v4i16_trap:
4834 case Intrinsic::nvvm_suld_1d_array_i16_trap:
4835 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
4836 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
4837 case Intrinsic::nvvm_suld_2d_i16_trap:
4838 case Intrinsic::nvvm_suld_2d_v2i16_trap:
4839 case Intrinsic::nvvm_suld_2d_v4i16_trap:
4840 case Intrinsic::nvvm_suld_2d_array_i16_trap:
4841 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
4842 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
4843 case Intrinsic::nvvm_suld_3d_i16_trap:
4844 case Intrinsic::nvvm_suld_3d_v2i16_trap:
4845 case Intrinsic::nvvm_suld_3d_v4i16_trap:
4846 case Intrinsic::nvvm_suld_1d_i16_zero:
4847 case Intrinsic::nvvm_suld_1d_v2i16_zero:
4848 case Intrinsic::nvvm_suld_1d_v4i16_zero:
4849 case Intrinsic::nvvm_suld_1d_array_i16_zero:
4850 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
4851 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
4852 case Intrinsic::nvvm_suld_2d_i16_zero:
4853 case Intrinsic::nvvm_suld_2d_v2i16_zero:
4854 case Intrinsic::nvvm_suld_2d_v4i16_zero:
4855 case Intrinsic::nvvm_suld_2d_array_i16_zero:
4856 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
4857 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
4858 case Intrinsic::nvvm_suld_3d_i16_zero:
4859 case Intrinsic::nvvm_suld_3d_v2i16_zero:
4860 case Intrinsic::nvvm_suld_3d_v4i16_zero:
4862 Info.memVT = MVT::i16;
4863 Info.ptrVal = nullptr;
4864 Info.offset = 0;
4866 Info.align = Align(16);
4867 return true;
4868
4869 case Intrinsic::nvvm_suld_1d_i32_clamp:
4870 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
4871 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
4872 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
4873 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
4874 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
4875 case Intrinsic::nvvm_suld_2d_i32_clamp:
4876 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
4877 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
4878 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
4879 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
4880 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
4881 case Intrinsic::nvvm_suld_3d_i32_clamp:
4882 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
4883 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
4884 case Intrinsic::nvvm_suld_1d_i32_trap:
4885 case Intrinsic::nvvm_suld_1d_v2i32_trap:
4886 case Intrinsic::nvvm_suld_1d_v4i32_trap:
4887 case Intrinsic::nvvm_suld_1d_array_i32_trap:
4888 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
4889 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
4890 case Intrinsic::nvvm_suld_2d_i32_trap:
4891 case Intrinsic::nvvm_suld_2d_v2i32_trap:
4892 case Intrinsic::nvvm_suld_2d_v4i32_trap:
4893 case Intrinsic::nvvm_suld_2d_array_i32_trap:
4894 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
4895 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
4896 case Intrinsic::nvvm_suld_3d_i32_trap:
4897 case Intrinsic::nvvm_suld_3d_v2i32_trap:
4898 case Intrinsic::nvvm_suld_3d_v4i32_trap:
4899 case Intrinsic::nvvm_suld_1d_i32_zero:
4900 case Intrinsic::nvvm_suld_1d_v2i32_zero:
4901 case Intrinsic::nvvm_suld_1d_v4i32_zero:
4902 case Intrinsic::nvvm_suld_1d_array_i32_zero:
4903 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
4904 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
4905 case Intrinsic::nvvm_suld_2d_i32_zero:
4906 case Intrinsic::nvvm_suld_2d_v2i32_zero:
4907 case Intrinsic::nvvm_suld_2d_v4i32_zero:
4908 case Intrinsic::nvvm_suld_2d_array_i32_zero:
4909 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
4910 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
4911 case Intrinsic::nvvm_suld_3d_i32_zero:
4912 case Intrinsic::nvvm_suld_3d_v2i32_zero:
4913 case Intrinsic::nvvm_suld_3d_v4i32_zero:
4915 Info.memVT = MVT::i32;
4916 Info.ptrVal = nullptr;
4917 Info.offset = 0;
4919 Info.align = Align(16);
4920 return true;
4921
4922 case Intrinsic::nvvm_suld_1d_i64_clamp:
4923 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
4924 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
4925 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
4926 case Intrinsic::nvvm_suld_2d_i64_clamp:
4927 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
4928 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
4929 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
4930 case Intrinsic::nvvm_suld_3d_i64_clamp:
4931 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
4932 case Intrinsic::nvvm_suld_1d_i64_trap:
4933 case Intrinsic::nvvm_suld_1d_v2i64_trap:
4934 case Intrinsic::nvvm_suld_1d_array_i64_trap:
4935 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
4936 case Intrinsic::nvvm_suld_2d_i64_trap:
4937 case Intrinsic::nvvm_suld_2d_v2i64_trap:
4938 case Intrinsic::nvvm_suld_2d_array_i64_trap:
4939 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
4940 case Intrinsic::nvvm_suld_3d_i64_trap:
4941 case Intrinsic::nvvm_suld_3d_v2i64_trap:
4942 case Intrinsic::nvvm_suld_1d_i64_zero:
4943 case Intrinsic::nvvm_suld_1d_v2i64_zero:
4944 case Intrinsic::nvvm_suld_1d_array_i64_zero:
4945 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
4946 case Intrinsic::nvvm_suld_2d_i64_zero:
4947 case Intrinsic::nvvm_suld_2d_v2i64_zero:
4948 case Intrinsic::nvvm_suld_2d_array_i64_zero:
4949 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
4950 case Intrinsic::nvvm_suld_3d_i64_zero:
4951 case Intrinsic::nvvm_suld_3d_v2i64_zero:
4953 Info.memVT = MVT::i64;
4954 Info.ptrVal = nullptr;
4955 Info.offset = 0;
4957 Info.align = Align(16);
4958 return true;
4959
4960 case Intrinsic::nvvm_tcgen05_ld_16x64b_x1:
4961 case Intrinsic::nvvm_tcgen05_ld_32x32b_x1:
4962 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x1: {
4964 Info.memVT = MVT::v1i32;
4965 Info.ptrVal = I.getArgOperand(0);
4966 Info.offset = 0;
4968 Info.align.reset();
4969 return true;
4970 }
4971
4972 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
4973 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
4974 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
4975 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2: {
4977 Info.memVT = MVT::v2i32;
4978 Info.ptrVal = I.getArgOperand(0);
4979 Info.offset = 0;
4981 Info.align.reset();
4982 return true;
4983 }
4984
4985 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
4986 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
4987 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
4988 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
4989 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4: {
4991 Info.memVT = MVT::v4i32;
4992 Info.ptrVal = I.getArgOperand(0);
4993 Info.offset = 0;
4995 Info.align.reset();
4996 return true;
4997 }
4998
4999 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
5000 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
5001 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
5002 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
5003 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8: {
5005 Info.memVT = MVT::v8i32;
5006 Info.ptrVal = I.getArgOperand(0);
5007 Info.offset = 0;
5009 Info.align.reset();
5010 return true;
5011 }
5012
5013 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
5014 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
5015 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
5016 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
5017 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16: {
5019 Info.memVT = MVT::v16i32;
5020 Info.ptrVal = I.getArgOperand(0);
5021 Info.offset = 0;
5023 Info.align.reset();
5024 return true;
5025 }
5026
5027 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
5028 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
5029 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
5030 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
5031 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32: {
5033 Info.memVT = MVT::v32i32;
5034 Info.ptrVal = I.getArgOperand(0);
5035 Info.offset = 0;
5037 Info.align.reset();
5038 return true;
5039 }
5040
5041 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
5042 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
5043 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
5044 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
5045 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64: {
5047 Info.memVT = MVT::v64i32;
5048 Info.ptrVal = I.getArgOperand(0);
5049 Info.offset = 0;
5051 Info.align.reset();
5052 return true;
5053 }
5054
5055 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
5056 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
5057 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
5058 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
5059 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128: {
5061 Info.memVT = MVT::v128i32;
5062 Info.ptrVal = I.getArgOperand(0);
5063 Info.offset = 0;
5065 Info.align.reset();
5066 return true;
5067 }
5068
5069 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
5070 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
5071 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1: {
5073 Info.memVT = MVT::i32;
5074 Info.ptrVal = I.getArgOperand(0);
5075 Info.offset = 0;
5077 Info.align.reset();
5078 return true;
5079 }
5080
5081 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
5082 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
5083 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
5084 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2: {
5086 Info.memVT = MVT::v2i32;
5087 Info.ptrVal = I.getArgOperand(0);
5088 Info.offset = 0;
5090 Info.align.reset();
5091 return true;
5092 }
5093
5094 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
5095 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
5096 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
5097 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
5098 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4: {
5100 Info.memVT = MVT::v4i32;
5101 Info.ptrVal = I.getArgOperand(0);
5102 Info.offset = 0;
5104 Info.align.reset();
5105 return true;
5106 }
5107
5108 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
5109 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
5110 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
5111 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
5112 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8: {
5114 Info.memVT = MVT::v8i32;
5115 Info.ptrVal = I.getArgOperand(0);
5116 Info.offset = 0;
5118 Info.align.reset();
5119 return true;
5120 }
5121
5122 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
5123 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
5124 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
5125 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
5126 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16: {
5128 Info.memVT = MVT::v16i32;
5129 Info.ptrVal = I.getArgOperand(0);
5130 Info.offset = 0;
5132 Info.align.reset();
5133 return true;
5134 }
5135
5136 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
5137 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
5138 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
5139 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
5140 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32: {
5142 Info.memVT = MVT::v32i32;
5143 Info.ptrVal = I.getArgOperand(0);
5144 Info.offset = 0;
5146 Info.align.reset();
5147 return true;
5148 }
5149
5150 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
5151 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
5152 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
5153 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
5154 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64: {
5156 Info.memVT = MVT::v64i32;
5157 Info.ptrVal = I.getArgOperand(0);
5158 Info.offset = 0;
5160 Info.align.reset();
5161 return true;
5162 }
5163
5164 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
5165 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
5166 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
5167 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
5168 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128: {
5170 Info.memVT = MVT::v128i32;
5171 Info.ptrVal = I.getArgOperand(0);
5172 Info.offset = 0;
5174 Info.align.reset();
5175 return true;
5176 }
5177 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
5178 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
5179 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
5180 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
5181 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
5182 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
5183 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
5184 case Intrinsic::
5185 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
5186 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
5187 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
5188 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
5189 case Intrinsic::
5190 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: {
5191
5193 Info.memVT = MVT::v4i32;
5194 Info.ptrVal = I.getArgOperand(0);
5195 Info.offset = 0;
5197 Info.align = Align(16);
5198 return true;
5199 }
5200
5201 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
5202 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
5203 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
5204 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
5205 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
5206 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
5207 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
5208 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
5209 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
5210 case Intrinsic::
5211 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
5212 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
5213 case Intrinsic::
5214 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: {
5215
5217 Info.memVT = MVT::v8i32;
5218 Info.ptrVal = I.getArgOperand(0);
5219 Info.offset = 0;
5221 Info.align = Align(16);
5222 return true;
5223 }
5224 }
5225 return false;
5226}
5227
5228
5229
5230
5231
5232
5233
5234
5237
5238
5239 const Align ABITypeAlign = std::min(Align(128), DL.getABITypeAlign(ArgTy));
5240
5241
5242
5243
5244 if ( ||
->hasLocalLinkage() ||
5245 F->hasAddressTaken(nullptr,
5246 false,
5247 true,
5248 true))
5249 return ABITypeAlign;
5250
5252 return std::max(Align(16), ABITypeAlign);
5253}
5254
5255
5259 Align ArgAlign = InitialAlign;
5260
5261 if (F)
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5274 ArgAlign = std::max(ArgAlign, Align(4));
5275
5276 return ArgAlign;
5277}
5278
5279
5280
5281
5283 int Idx) const {
5284 std::string ParamName;
5286
5288 if (Idx < 0)
5289 ParamStr << "_vararg";
5290 else
5291 ParamStr << "_param_" << Idx;
5292
5293 return ParamName;
5294}
5295
5296
5297
5298
5299
5300
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5315 return false;
5316
5319
5320 switch (AM.Scale) {
5321 case 0:
5322 break;
5323 case 1:
5324 if (AM.HasBaseReg)
5325 return false;
5326
5327 break;
5328 default:
5329
5330 return false;
5331 }
5332 return true;
5333}
5334
5335
5336
5337
5338
5339
5340
5343 if (Constraint.size() == 1) {
5344 switch (Constraint[0]) {
5345 default:
5346 break;
5347 case 'b':
5348 case 'r':
5349 case 'h':
5350 case 'c':
5351 case 'l':
5352 case 'f':
5353 case 'd':
5354 case 'q':
5355 case '0':
5356 case 'N':
5358 }
5359 }
5361}
5362
5363std::pair<unsigned, const TargetRegisterClass *>
5366 MVT VT) const {
5367 if (Constraint.size() == 1) {
5368 switch (Constraint[0]) {
5369 case 'b':
5370 return std::make_pair(0U, &NVPTX::B1RegClass);
5371 case 'c':
5372 case 'h':
5373 return std::make_pair(0U, &NVPTX::B16RegClass);
5374 case 'r':
5375 case 'f':
5376 return std::make_pair(0U, &NVPTX::B32RegClass);
5377 case 'l':
5378 case 'N':
5379 case 'd':
5380 return std::make_pair(0U, &NVPTX::B64RegClass);
5381 case 'q': {
5382 if (STI.getSmVersion() < 70)
5384 "supported for sm_70 and higher!");
5385 return std::make_pair(0U, &NVPTX::B128RegClass);
5386 }
5387 }
5388 }
5390}
5391
5392
5393
5394
5395
5398
5401
5402
5404 return false;
5405
5406
5408 return true;
5409
5410 return false;
5411}
5412
5415 return Const && Const->getZExtValue() == 0;
5416}
5417
5418
5419
5420
5421
5426
5427
5428
5429
5430
5433
5434
5435
5436
5438 unsigned ZeroOpNum;
5440 ZeroOpNum = 1;
5442 ZeroOpNum = 2;
5443 else
5445
5447 if (M->getOpcode() != ISD::MUL || !M.getNode()->hasOneUse())
5449
5455 ((ZeroOpNum == 1) ? N1 : MAD),
5456 ((ZeroOpNum == 1) ? MAD : N1));
5457 }
5458
5460}
5461
5471 (N->getFlags().hasAllowContract() &&
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483 int numUses = 0;
5484 int nonAddCount = 0;
5486 numUses++;
5488 ++nonAddCount;
5489 if (numUses >= 5)
5491 }
5492 if (nonAddCount) {
5493 int orderNo = N->getIROrder();
5495
5496
5497
5498
5499 if (orderNo - orderNo2 < 500)
5501
5502
5503
5504
5505 bool opIsLive = false;
5508
5510 opIsLive = true;
5511
5512 if (!opIsLive)
5514 int orderNo3 = User->getIROrder();
5515 if (orderNo3 > orderNo) {
5516 opIsLive = true;
5517 break;
5518 }
5519 }
5520
5521 if (!opIsLive)
5523 int orderNo3 = User->getIROrder();
5524 if (orderNo3 > orderNo) {
5525 opIsLive = true;
5526 break;
5527 }
5528 }
5529
5530 if (!opIsLive)
5532 }
5533
5536 }
5537
5539}
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5555
5558
5559 EVT ElementVT = N->getValueType(0);
5560
5563
5564
5565
5567
5568 if (U.getValueType() == MVT::Glue || U.getValueType() == MVT::Other)
5569 return true;
5570 if (U.getUser()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
5571 if (N->getOpcode() != ISD::LOAD)
5572 return true;
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588 return !U.getUser()->use_empty();
5589 }
5590
5591
5592 return false;
5593 }))
5595
5598
5599
5600 unsigned Opcode;
5602 unsigned OldNumOutputs;
5603 switch (LD->getOpcode()) {
5604 case ISD::LOAD:
5605 OldNumOutputs = 1;
5606
5607
5608
5610
5611
5612 Operands.push_back(DCI.DAG.getConstant(UINT32_MAX, DL, MVT::i32));
5613 Operands.push_back(DCI.DAG.getIntPtrConstant(
5615 break;
5617 OldNumOutputs = 2;
5619 break;
5621
5622
5623 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
5625 OldNumOutputs = 4;
5627 break;
5629
5631 }
5632
5633
5634 const unsigned NewNumOutputs = OldNumOutputs * 2;
5635 SmallVector NewVTs(NewNumOutputs, ElementVT.getVectorElementType());
5636
5637 NewVTs.append(LD->value_begin() + OldNumOutputs, LD->value_end());
5638
5639
5640 SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
5641 Opcode, DL, DCI.DAG.getVTList(NewVTs), Operands, LD->getMemoryVT(),
5642 LD->getMemOperand());
5643
5644
5645
5646
5648 for (unsigned I : seq(OldNumOutputs))
5649 Results.push_back(DCI.DAG.getBuildVector(
5650 ElementVT, DL, {NewLoad.getValue(I * 2), NewLoad.getValue(I * 2 + 1)}));
5651
5652 for (unsigned I : seq(NewLoad->getNumValues() - NewNumOutputs))
5654
5655 return DCI.DAG.getMergeValues(Results, DL);
5656}
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5670 unsigned Front, unsigned Back) {
5671
5672
5675
5676
5677 EVT ElementVT = N->getOperand(Front).getValueType();
5678
5679
5682
5684
5685
5686 unsigned Opcode;
5687 switch (N->getOpcode()) {
5688 case ISD::STORE:
5689
5690
5691
5693 break;
5696 break;
5698
5699
5700 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
5703 break;
5705
5707 default:
5709 }
5710
5711
5712
5714 for (SDValue BV : N->ops().drop_front(Front).drop_back(Back)) {
5717
5718
5719
5720 if (!BV.hasOneUse())
5722
5723
5724
5726
5727 if (Op.getOpcode() == ISD::BITCAST)
5729
5730
5731 if (Op.getValueType() == MVT::i16 && Op.getOpcode() == ISD::TRUNCATE &&
5732 Op->getOperand(0).getValueType() == MVT::i32)
5734
5735
5738 }
5739 Operands.append({BV.getOperand(0), BV.getOperand(1)});
5740 }
5741 Operands.append(N->op_end() - Back, N->op_end());
5742
5743
5745 ST->getMemoryVT(), ST->getMemOperand());
5746}
5747
5750
5752
5753
5754
5756 if (!ST->getValue().getValueType().isSimple())
5758 }
5759
5761}
5762
5766
5767
5768
5769 if (->getValueType(0).isSimple())
5771 }
5772
5774}
5775
5776
5777
5783
5784 SDValue N0 = N->getOperand(0);
5785 SDValue N1 = N->getOperand(1);
5786
5787
5789 if (VT.isVector() || VT != MVT::i32)
5791
5792
5794 return Result;
5795
5796
5798}
5799
5800
5801
5805 SDValue N0 = N->getOperand(0);
5806 SDValue N1 = N->getOperand(1);
5807
5809 if (VT.isVector() || !(VT == MVT::f32 || VT == MVT::f64))
5811
5812
5814 return Result;
5815
5816
5818}
5819
5820
5822 switch (MinMax2Opcode) {
5823 case ISD::FMAXNUM:
5824 case ISD::FMAXIMUMNUM:
5825 return NVPTXISD::FMAXNUM3;
5826 case ISD::FMINNUM:
5827 case ISD::FMINIMUMNUM:
5828 return NVPTXISD::FMINNUM3;
5829 case ISD::FMAXIMUM:
5830 return NVPTXISD::FMAXIMUM3;
5831 case ISD::FMINIMUM:
5832 return NVPTXISD::FMINIMUM3;
5833 default:
5835 }
5836}
5837
5838
5839
5842 unsigned PTXVersion, unsigned SmVersion) {
5843
5844
5845 EVT VT = N->getValueType(0);
5846 if (VT != MVT::f32 || PTXVersion < 88 || SmVersion < 100)
5848
5849 SDValue Op0 = N->getOperand(0);
5850 SDValue Op1 = N->getOperand(1);
5851 unsigned MinMaxOp2 = N->getOpcode();
5853
5855
5861
5866 }
5868}
5869
5874
5875
5878
5881 EVT VT = N->getValueType(0);
5882 bool IsSigned = N->getOpcode() == ISD::SREM;
5884
5885 const SDValue &Num = N->getOperand(0);
5886 const SDValue &Den = N->getOperand(1);
5887
5889 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
5891
5894 DAG.getNode(DivOpc, DL, VT, Num, Den),
5895 Den));
5896 }
5897 }
5899}
5900
5901
5906
5908 if (.hasOneUse())
5910 EVT ToVT = N->getValueType(0);
5911 EVT FromVT = Op.getValueType();
5912 if (!((ToVT == MVT::i32 && FromVT == MVT::i16) ||
5913 (ToVT == MVT::i64 && FromVT == MVT::i32)))
5918
5920 unsigned ExtOpcode = N->getOpcode();
5921 unsigned Opcode = 0;
5922 if (ExtOpcode == ISD::SIGN_EXTEND && Op->getFlags().hasNoSignedWrap())
5923 Opcode = NVPTXISD::MUL_WIDE_SIGNED;
5924 else if (ExtOpcode == ISD::ZERO_EXTEND && Op->getFlags().hasNoUnsignedWrap())
5925 Opcode = NVPTXISD::MUL_WIDE_UNSIGNED;
5926 else
5930 const auto ShiftAmt = Op.getConstantOperandVal(1);
5933 }
5934 return DCI.DAG.getNode(Opcode, DL, ToVT, Op.getOperand(0), RHS);
5935}
5936
5942
5943
5944
5945
5947 unsigned OptSize,
5950
5953 EVT OrigVT = Op.getOperand(0).getValueType();
5956 return true;
5957 }
5959 EVT OrigVT = Op.getOperand(0).getValueType();
5962 return true;
5963 }
5964 }
5965
5966 return false;
5967}
5968
5969
5970
5971
5972
5974 unsigned OptSize,
5975 bool &IsSigned) {
5977
5978
5980 return false;
5981
5982
5984 return false;
5985
5986 IsSigned = (LHSSign == Signed);
5987
5988
5990 const APInt &Val = CI->getAPIntValue();
5992 return Val.isIntN(OptSize);
5993 } else {
5995 }
5996 } else {
5999 return false;
6000
6001 return LHSSign == RHSSign;
6002 }
6003}
6004
6005
6006
6007
6008
6011 EVT MulType = N->getValueType(0);
6012 if (MulType != MVT::i32 && MulType != MVT::i64) {
6014 }
6015
6017 unsigned OptSize = MulType.getSizeInBits() >> 1;
6020
6021
6022 if (N->getOpcode() == ISD::MUL) {
6025 }
6026 }
6027
6028
6029 if (N->getOpcode() == ISD::SHL) {
6031 if (!ShlRHS) {
6033 }
6034
6040 } else {
6042 }
6043 }
6044
6046
6049 }
6050
6051 EVT DemotedVT;
6052 if (MulType == MVT::i32) {
6053 DemotedVT = MVT::i16;
6054 } else {
6055 DemotedVT = MVT::i32;
6056 }
6057
6058
6059
6064
6065 unsigned Opc;
6067 Opc = NVPTXISD::MUL_WIDE_SIGNED;
6068 } else {
6069 Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
6070 }
6071
6072 return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
6073}
6074
6077 return Const && Const->getZExtValue() == 1;
6078}
6079
6083
6085 return Add->getOperand(1);
6086
6088 return Add->getOperand(0);
6089
6091}
6092
6103
6109
6111
6112 unsigned ConstOpNo;
6114 ConstOpNo = 1;
6116 ConstOpNo = 2;
6117 else
6119
6120 SDValue Y = Select->getOperand((ConstOpNo == 1) ? 2 : 1);
6121
6122
6125
6127
6129 (ConstOpNo == 1) ? X : NewMul,
6130 (ConstOpNo == 1) ? NewMul : X);
6131}
6132
6136
6140
6141 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
6143
6145
6146
6148 return Res;
6150 return Res;
6151
6152
6154 return Res;
6156 return Res;
6157
6159}
6160
6161
6167
6169 return Ret;
6170
6171 SDValue N0 = N->getOperand(0);
6172 SDValue N1 = N->getOperand(1);
6174}
6175
6176
6181
6183 return Ret;
6184 }
6185
6187}
6188
6191 unsigned int SmVersion) {
6192 EVT CCType = N->getValueType(0);
6195
6196 EVT AType = A.getValueType();
6197 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))
6199
6200 if (A.getValueType() == MVT::v2bf16 && SmVersion < 90)
6202
6204
6205
6206
6207
6211 DL, DCI.DAG.getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});
6214}
6215
6222 EVT VectorVT = Vector.getValueType();
6223 if (Vector->getOpcode() == ISD::LOAD && VectorVT.isSimple() &&
6225 return SDValue();
6226
6227
6228
6232
6233
6236
6238
6239 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))
6241
6243
6244 if (!Index || Index->getZExtValue() == 0)
6246
6251
6256 DCI.DAG.getConstant(Index->getZExtValue() * EltBits, DL, IVT)));
6257
6258
6259 if (EltVT != EltIVT)
6260 Result = DCI.DAG.getNode(ISD::BITCAST, DL, EltVT, Result);
6261
6262 if (EltVT != N->getValueType(0))
6264
6265 return Result;
6266}
6267
6270 SDValue VA = N->getOperand(1);
6272 if (VectorVT != MVT::v4i8)
6274
6275
6276
6277
6278
6281 SDValue VCond = N->getOperand(0);
6282 SDValue VB = N->getOperand(2);
6283 for (int I = 0; I < 4; ++I) {
6289 DL, MVT::i32);
6293 DL, MVT::i32);
6296 }
6298}
6299
6302 auto VT = N->getValueType(0);
6304
6308
6309 auto Op0 = N->getOperand(0);
6310 auto Op1 = N->getOperand(1);
6311
6312
6313
6316
6317 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},
6318 {&Op1, &Op1Bytes}};
6319
6320
6321
6322
6323 for (auto &[Op, OpBytes] : OpData) {
6324
6325 if (Op->getOpcode() == ISD::BITCAST)
6327
6328 if (!(Op->getValueType() == MVT::i16 && Op->getOpcode() == ISD::TRUNCATE &&
6329 Op->getOperand(0).getValueType() == MVT::i32))
6331
6332
6333
6334 if (->hasOneUse())
6336
6338
6339
6340
6343
6344
6345 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&
6346 "PRMT selector values out of range");
6347 *OpBytes += 0x22;
6349 }
6350 }
6351 }
6352
6354 auto &DAG = DCI.DAG;
6355
6356 auto PRMT =
6358 (Op1Bytes << 8) | Op0Bytes, DL, DAG);
6360}
6361
6365
6367 assert(ASCN2->getDestAddressSpace() == ASCN1->getSrcAddressSpace());
6368
6369
6370 if (ASCN1->getDestAddressSpace() == ASCN2->getSrcAddressSpace())
6371 return ASCN2->getOperand(0);
6372 }
6373
6375}
6376
6377
6378
6379
6380
6382 assert(Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6383
6385 return Selector;
6386
6388
6389 const auto GetSelector = [](unsigned S0, unsigned S1, unsigned S2,
6390 unsigned S3) {
6391 return APInt(32, S0 | (S1 << 4) | (S2 << 8) | (S3 << 12));
6392 };
6393
6394 switch (Mode) {
6396 return GetSelector(V, V + 1, V + 2, V + 3);
6398 return GetSelector(V, (V - 1) & 7, (V - 2) & 7, (V - 3) & 7);
6400 return GetSelector(V, V, V, V);
6402 return GetSelector(V, std::max(V, 1U), std::max(V, 2U), 3U);
6404 return GetSelector(0, std::min(V, 1U), std::min(V, 2U), V);
6406 unsigned V1 = (V & 1) << 1;
6407 return GetSelector(V1, V1 + 1, V1, V1 + 1);
6408 }
6409 default:
6411 }
6412}
6413
6415 assert(A.getBitWidth() == 32 && B.getBitWidth() == 32 &&
6416 Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6417
6418 APInt BitField = B.concat(A);
6420 APInt Result(32, 0);
6425 APInt Byte = BitField.extractBits(8, Idx * 8);
6426 if (Sign)
6427 Byte = Byte.ashr(8);
6428 Result.insertBits(Byte, I * 8);
6429 }
6430 return Result;
6431}
6432
6437
6438
6443 N->getConstantOperandAPInt(1),
6444 N->getConstantOperandAPInt(2),
6445 N->getConstantOperandVal(3)),
6446 SDLoc(N), N->getValueType(0));
6448}
6449
6450
6451
6452
6453
6454
6455
6456
6457
6458
6461 switch (R.getOpcode()) {
6466 case ISD::BITCAST: {
6468 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), V);
6470 }
6477 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), A, B);
6479 }
6481 return R;
6482 case ISD::LOAD:
6485 return DCI.DAG.getNode(NVPTXISD::ProxyReg, SDLoc(R), R.getValueType(),
6486 {Chain, R});
6487 }
6491
6493 for (auto &Op : R->ops()) {
6495 if (!V)
6497 Ops.push_back(V);
6498 }
6500 }
6504
6507 R.getValueType(), V, R.getOperand(1));
6509 }
6510 default:
6512 }
6513}
6514
6517
6518 SDValue Chain = N->getOperand(0);
6520
6521
6522
6523 if (Reg.getOpcode() != ISD::LOAD) {
6525 return V;
6526 }
6527
6529}
6530
6531SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
6532 DAGCombinerInfo &DCI) const {
6534 switch (N->getOpcode()) {
6535 default:
6536 break;
6539 case ISD::ADDRSPACECAST:
6550 case ISD::FMAXNUM:
6551 case ISD::FMINNUM:
6552 case ISD::FMAXIMUM:
6553 case ISD::FMINIMUM:
6554 case ISD::FMAXIMUMNUM:
6555 case ISD::FMINIMUMNUM:
6557 STI.getSmVersion());
6558 case ISD::LOAD:
6564 case NVPTXISD::PRMT:
6566 case NVPTXISD::ProxyReg:
6575 case ISD::STORE:
6581 }
6583}
6584
6587
6588
6590 EVT ToVT = Op->getValueType(0);
6591 if (ToVT != MVT::v2i8) {
6592 return;
6593 }
6594
6595
6605}
6606
6609 SDValue Chain = N->getOperand(0);
6610 SDValue Intrin = N->getOperand(1);
6612
6613
6615 switch (IntrinNo) {
6616 default:
6617 return;
6618 case Intrinsic::nvvm_ldu_global_i:
6619 case Intrinsic::nvvm_ldu_global_f:
6620 case Intrinsic::nvvm_ldu_global_p: {
6621 EVT ResVT = N->getValueType(0);
6622
6624
6625
6628
6629
6630
6631
6632
6633 bool NeedTrunc = false;
6635 EltVT = MVT::i16;
6636 NeedTrunc = true;
6637 }
6638
6639 unsigned Opcode = 0;
6641
6642 switch (NumElts) {
6643 default:
6644 return;
6645 case 2:
6647 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
6648 break;
6649 case 4: {
6651 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
6652 LdResVTs = DAG.getVTList(ListVTs);
6653 break;
6654 }
6655 }
6656
6658
6659
6660
6661 OtherOps.push_back(Chain);
6662
6663
6664 OtherOps.append(N->op_begin() + 2, N->op_end());
6665
6667
6671
6673
6674 for (unsigned i = 0; i < NumElts; ++i) {
6676 if (NeedTrunc)
6677 Res =
6680 }
6681
6683
6686
6687 Results.push_back(BuildVec);
6688 Results.push_back(LoadChain);
6689 } else {
6690
6692 "Custom handling of non-i8 ldu/ldg?");
6693
6694
6696
6697
6699
6701
6702
6703
6707
6711 }
6712 return;
6713 }
6714
6715 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
6716 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
6717 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
6718 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
6719 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
6720 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
6721 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
6722 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
6723 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
6724 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
6725 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
6726 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
6727 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
6728 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
6729 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
6730 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
6731 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
6732 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
6733 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
6734 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
6735 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
6736 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
6737 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
6738 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
6740 Results.push_back(Res->first);
6741 Results.push_back(Res->second);
6742 }
6743 return;
6744
6745 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
6746 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
6747 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
6748 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
6749 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
6750 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
6751 if (auto Res = lowerTcgen05Ld(N, DAG, true)) {
6752 Results.push_back(Res->first);
6753 Results.push_back(Res->second);
6754 }
6755 return;
6756 }
6757}
6758
6761
6762
6764 SDValue Chain = N->getOperand(0);
6766 SDValue Glue = N->getOperand(2);
6767
6768 assert(Reg.getValueType() == MVT::i128 &&
6769 "Custom lowering for CopyFromReg with 128-bit reg only");
6771 N->getValueType(2)};
6773
6777
6778 Results.push_back(Pair);
6781}
6782
6786 SDValue Chain = N->getOperand(0);
6788
6790
6793 DAG.getNode(NVPTXISD::ProxyReg, SDLoc(N), VT, {Chain, NewReg});
6795
6797}
6798
6802 assert(N->getValueType(0) == MVT::i128 &&
6803 "Custom lowering for atomic128 only supports i128");
6804
6807
6811 "Support for b128 atomics introduced in PTX ISA version 8.3 and "
6812 "requires target sm_90.",
6814
6817 return;
6818 }
6819
6823 for (const auto &Op : AN->ops().drop_front(2)) {
6824
6827
6830 }
6831 unsigned Opcode = N->getOpcode() == ISD::ATOMIC_SWAP
6838 {Result.getValue(0), Result.getValue(1)}));
6839 Results.push_back(Result.getValue(2));
6840}
6841
6842void NVPTXTargetLowering::ReplaceNodeResults(
6844 switch (N->getOpcode()) {
6845 default:
6847 case ISD::BITCAST:
6849 return;
6850 case ISD::LOAD:
6851 case ISD::MLOAD:
6853 return;
6856 return;
6859 return;
6860 case NVPTXISD::ProxyReg:
6862 return;
6863 case ISD::ATOMIC_CMP_SWAP:
6864 case ISD::ATOMIC_SWAP:
6866 return;
6867 }
6868}
6869
6873
6876 if (Ty->isHalfTy() && STI.getSmVersion() >= 70 &&
6877 STI.getPTXVersion() >= 63)
6879 if (Ty->isBFloatTy() && STI.getSmVersion() >= 90 &&
6880 STI.getPTXVersion() >= 78)
6882 if (Ty->isFloatTy())
6884 if (Ty->isDoubleTy() && STI.hasAtomAddF64())
6886 }
6888 }
6889
6890 assert(Ty->isIntegerTy() && "Ty should be integer at this point");
6892
6894 default:
6899 [[fallthrough]];
6904 case 8:
6905 case 16:
6907 case 32:
6909 case 64:
6910 if (STI.hasAtomBitwise64())
6913 case 128:
6915 default:
6917 }
6925 case 8:
6926 case 16:
6928 case 32:
6930 case 64:
6931 if (STI.hasAtomMinMax64())
6934 case 128:
6936 default:
6938 }
6942 case 32:
6944 case 8:
6945 case 16:
6946 case 64:
6947 case 128:
6949 default:
6951 }
6952 }
6953
6955}
6956
6960
6961
6962
6963
6964
6965
6966
6967
6968 return CI &&
6969 (cast(CI->getCompareOperand()->getType())->getBitWidth() <
6970 STI.getMinCmpXchgSizeInBits() ||
6972}
6973
6977 bool BitwidthSupportedAndIsSeqCst =
6979 cast(CI->getCompareOperand()->getType())->getBitWidth() >=
6980 STI.getMinCmpXchgSizeInBits();
6983}
6984
6990
6991
6992
6996 ? Ord
6998 SSID);
6999
7000 return nullptr;
7001}
7002
7006
7009
7011 auto CASWidth =
7012 cast(CI->getCompareOperand()->getType())->getBitWidth();
7014
7017 CASWidth < STI.getMinCmpXchgSizeInBits()))
7019
7020 return nullptr;
7021}
7022
7023
7024
7025
7026
7028 EVT ToVT) const {
7030 return Op;
7031 switch (Op) {
7035 break;
7039 break;
7040 case ISD::VP_FP_TO_UINT:
7042 return ISD::VP_FP_TO_SINT;
7043 break;
7044 default:
7045 break;
7046 }
7047 return Op;
7048}
7049
7050
7052
7057
7063 unsigned Mode = Op.getConstantOperandVal(3);
7064
7065 if (!Selector)
7066 return;
7067
7070
7071
7073 "PRMT must have i32 operands");
7074 assert(Known.getBitWidth() == 32 && "PRMT must have i32 result");
7076
7082 KnownBits Byte = BitField.extractBits(8, Idx * 8);
7083 if (Sign)
7086 }
7087}
7088
7091
7092
7093 auto ExtType = LD->getConstantOperandVal(LD->getNumOperands() - 1);
7095 return;
7096
7097
7098 auto DestVT = LD->getValueType(0);
7099 if (DestVT.isVector())
7100 return;
7101
7105}
7106
7111
7112 switch (Op.getOpcode()) {
7113 case NVPTXISD::PRMT:
7115 break;
7120 break;
7121 default:
7122 break;
7123 }
7124}
7125
7130
7132 if (DemandedBits.extractBits(8, I * 8).isZero())
7133 continue;
7134
7138
7139 APInt &Src = Idx < 4 ? DemandedLHS : DemandedRHS;
7140 unsigned ByteStart = (Idx % 4) * 8;
7141 if (Sign)
7142 Src.setBit(ByteStart + 7);
7143 else
7144 Src.setBits(ByteStart, ByteStart + 8);
7145 }
7146
7147 return {DemandedLHS, DemandedRHS};
7148}
7149
7150
7151
7153 if ()
7155 if (Op.isUndef())
7157 return Op;
7158}
7159
7164 unsigned Depth) {
7169 if (!SelectorConst)
7171
7174
7175
7176
7177 const unsigned LeadingBytes = DemandedBits.countLeadingZeros() / 8;
7178 const unsigned SelBits = (4 - LeadingBytes) * 4;
7179 if (Selector.getLoBits(SelBits) == APInt(32, 0x3210).getLoBits(SelBits))
7180 return Op0;
7181 if (Selector.getLoBits(SelBits) == APInt(32, 0x7654).getLoBits(SelBits))
7182 return Op1;
7183
7185
7186
7191
7194 if ((DemandedOp0 && DemandedOp0 != Op0) ||
7195 (DemandedOp1 && DemandedOp1 != Op1)) {
7196 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
7197 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
7199 }
7200
7202}
7203
7208
7209 switch (Op.getOpcode()) {
7210 case NVPTXISD::PRMT:
7212 *this, Depth)) {
7214 return true;
7215 }
7216 break;
7217 default:
7218 break;
7219 }
7220
7222 return false;
7223}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains the declarations of entities that describe floating point environment and related ...
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
Register const TargetRegisterInfo * TRI
NVPTX address space definition.
static bool shouldConvertToIndirectCall(const CallBase *CB, const GlobalAddressSDNode *Func)
Definition NVPTXISelLowering.cpp:1313
static SDValue combineADDRSPACECAST(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Definition NVPTXISelLowering.cpp:6362
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
static SDValue lowerTcgen05St(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:2551
static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Definition NVPTXISelLowering.cpp:6216
static cl::opt< NVPTX::DivPrecisionLevel > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specific: Override the precision of the lowering for f32 fdiv"), cl::values(clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"), clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754, "2", "Use IEEE Compliant F32 div.rnd if available (default)"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754_NoFTZ, "3", "Use IEEE Compliant F32 div.rnd if available, no FTZ")), cl::init(NVPTX::DivPrecisionLevel::IEEE754))
static bool isConstOne(const SDValue &Operand)
Definition NVPTXISelLowering.cpp:6075
static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))
static bool IsPTXVectorType(MVT VT)
Definition NVPTXISelLowering.cpp:155
static SDValue lowerLOADi1(LoadSDNode *LD, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:3630
static SDValue lowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:2749
static MachinePointerInfo refinePtrAS(SDValue &Ptr, SelectionDAG &DAG, const DataLayout &DL, const TargetLowering &TL)
Definition NVPTXISelLowering.cpp:1322
static SDValue lowerROT(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:3069
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, LLVMContext &Ctx, CallingConv::ID CallConv, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > &Offsets, uint64_t StartingOffset=0)
ComputePTXValueVTs - For the given Type Ty, returns the set of primitive legal-ish MVTs that compose ...
Definition NVPTXISelLowering.cpp:302
static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
Definition NVPTXISelLowering.cpp:6585
static void replaceAtomicSwap128(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI, SmallVectorImpl< SDValue > &Results)
Definition NVPTXISelLowering.cpp:6799
static unsigned getMinMax3Opcode(unsigned MinMax2Opcode)
Get 3-input version of a 2-input min/max opcode.
Definition NVPTXISelLowering.cpp:5821
static SDValue lowerSTOREVector(SDValue Op, SelectionDAG &DAG, const NVPTXSubtarget &STI)
Definition NVPTXISelLowering.cpp:3701
static SDValue lowerLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
Definition NVPTXISelLowering.cpp:3619
static void replaceProxyReg(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &Results)
Definition NVPTXISelLowering.cpp:6783
static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
Definition NVPTXISelLowering.cpp:6759
static SDValue lowerCTLZCTPOP(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:3010
static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
Definition NVPTXISelLowering.cpp:6093
static SDValue combinePRMT(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
Definition NVPTXISelLowering.cpp:6433
static SDValue combinePackingMovIntoStore(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned Front, unsigned Back)
Fold packing movs into a store.
Definition NVPTXISelLowering.cpp:5668
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
Definition NVPTXISelLowering.cpp:6607
static SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl, SelectionDAG &DAG, T GetElement)
Definition NVPTXISelLowering.cpp:368
static SDValue getExtractVectorizedValue(SDValue V, unsigned I, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:351
static unsigned canMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment)
Definition NVPTXISelLowering.cpp:426
static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C)
Definition NVPTXISelLowering.cpp:342
static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:2974
static SDValue PerformFMinMaxCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned PTXVersion, unsigned SmVersion)
PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into (fmaxnum3 a, b, c).
Definition NVPTXISelLowering.cpp:5840
static SDValue combineMulWide(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
Definition NVPTXISelLowering.cpp:5902
static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
Definition NVPTXISelLowering.cpp:5463
static std::optional< unsigned > getScalar3OpcodeForReduction(unsigned ReductionOpcode)
Get 3-input scalar reduction opcode.
Definition NVPTXISelLowering.cpp:1950
static SDValue lowerIntrinsicWChain(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:2953
static bool isConstZero(const SDValue &Operand)
Definition NVPTXISelLowering.cpp:5413
static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:2531
static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:2675
static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)
IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...
Definition NVPTXISelLowering.cpp:5946
static unsigned getTcgen05MMADisableOutputLane(unsigned IID)
Definition NVPTXISelLowering.cpp:2615
static std::pair< APInt, APInt > getPRMTDemandedBits(const APInt &SelectorVal, const APInt &DemandedBits)
Definition NVPTXISelLowering.cpp:7126
static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode)
Definition NVPTXISelLowering.cpp:6414
static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode)
Definition NVPTXISelLowering.cpp:1933
static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
Definition NVPTXISelLowering.cpp:5870
static SDValue lowerBSWAP(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:2577
static SDValue lowerMSTORE(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:3137
static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)
Definition NVPTXISelLowering.cpp:6134
static void computeKnownBitsForPRMT(const SDValue Op, KnownBits &Known, const SelectionDAG &DAG, unsigned Depth)
Definition NVPTXISelLowering.cpp:7058
static SDValue combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Fold unpacking movs into a load by increasing the number of return values.
Definition NVPTXISelLowering.cpp:5554
static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:2829
static std::optional< std::pair< SDValue, SDValue > > lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset=false)
Definition NVPTXISelLowering.cpp:2706
static SDValue lowerCvtRSIntrinsics(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:2869
static std::optional< std::pair< SDValue, SDValue > > replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
replaceLoadVector - Convert vector loads into multi-output scalar loads.
Definition NVPTXISelLowering.cpp:3509
static SDValue expandFSH64(SDValue A, SDValue B, SDValue ShiftAmount, SDLoc DL, unsigned Opcode, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:3020
static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)
AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...
Definition NVPTXISelLowering.cpp:5973
static std::pair< MemSDNode *, uint32_t > convertMLOADToLoadWithUsedBytesMask(MemSDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
Definition NVPTXISelLowering.cpp:3456
static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...
Definition NVPTXISelLowering.cpp:6009
static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:2924
static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
Definition NVPTXISelLowering.cpp:6104
static SDValue buildTreeReduction(const SmallVector< SDValue > &Elements, EVT EltTy, ArrayRef< std::pair< unsigned, unsigned > > Ops, const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG)
Reduces the elements using the scalar operations provided.
Definition NVPTXISelLowering.cpp:1886
static SDValue combineProxyReg(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Definition NVPTXISelLowering.cpp:6515
static SmallVector< unsigned, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment, bool IsVAArg=false)
Definition NVPTXISelLowering.cpp:480
static SDValue getPRMT(SDValue A, SDValue B, SDValue Selector, SDLoc DL, SelectionDAG &DAG, unsigned Mode=NVPTX::PTXPrmtMode::NONE)
Definition NVPTXISelLowering.cpp:1866
static SDValue matchMADConstOnePattern(SDValue Add)
Definition NVPTXISelLowering.cpp:6080
static SDValue correctParamType(SDValue V, EVT ExpectedVT, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
Definition NVPTXISelLowering.cpp:1354
static ISD::NodeType getExtOpcode(const ISD::ArgFlagsTy &Flags)
Definition NVPTXISelLowering.cpp:1346
static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))
static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known)
Definition NVPTXISelLowering.cpp:7089
static APInt getPRMTSelector(const APInt &Selector, unsigned Mode)
Definition NVPTXISelLowering.cpp:6381
static EVT promoteScalarIntegerPTX(const EVT VT)
PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...
Definition NVPTXISelLowering.cpp:392
static SDValue simplifyDemandedBitsForPRMT(SDValue PRMT, const APInt &DemandedBits, SelectionDAG &DAG, const TargetLowering &TLI, unsigned Depth)
Definition NVPTXISelLowering.cpp:7160
static SDValue lowerFREM(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:3075
static SDValue canonicalizePRMTInput(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:7152
static SDValue sinkProxyReg(SDValue R, SDValue Chain, TargetLowering::DAGCombinerInfo &DCI)
Definition NVPTXISelLowering.cpp:6459
static SDValue lowerFSH(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:3064
static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:2415
OperandSignedness
Definition NVPTXISelLowering.cpp:5937
@ Unknown
Definition NVPTXISelLowering.cpp:5940
@ Unsigned
Definition NVPTXISelLowering.cpp:5939
@ Signed
Definition NVPTXISelLowering.cpp:5938
static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)
Definition NVPTXISelLowering.cpp:6189
static std::optional< std::pair< unsigned int, MVT > > getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, unsigned AddressSpace)
Definition NVPTXISelLowering.cpp:200
static cl::opt< bool > ForceMinByValParamAlign("nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" " params of device functions."), cl::init(false))
static cl::opt< bool > UseApproxLog2F32("nvptx-approx-log2f32", cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), cl::init(false))
Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it does NOT use lg2....
static SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG)
Definition NVPTXISelLowering.cpp:3103
static SDValue combineLOAD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
Definition NVPTXISelLowering.cpp:5763
static SDValue combineSTORE(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
Definition NVPTXISelLowering.cpp:5748
static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
Definition NVPTXISelLowering.cpp:6177
MachineInstr unsigned OpIdx
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
static const fltSemantics & IEEEsingle()
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Class for arbitrary precision integers.
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool slt(const APInt &RHS) const
Signed less than comparison.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
This is an SDNode representing atomic operations.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
FunctionType * getFunctionType() const
const APInt & getAPIntValue() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Common base class shared among various IRBuilders.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
MCSection * getDataSection() const
static constexpr unsigned NoRegister
Instances of this class represent a uniqued identifier for a section in the current translation unit.
StringRef getName() const
getName - Get the symbol name.
static auto integer_fixedlen_vector_valuetypes()
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static auto fp_fixedlen_vector_valuetypes()
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
EVT getMemoryVT() const
Return the type of the in-memory value.
static unsigned getFromTypeWidthForLoad(const MemSDNode *Mem)
bool hasUsedBytesMaskPragma() const
bool hasAtomSwap128() const
bool hasF32x2Instructions() const
bool has256BitVectorLoadStore(unsigned AS) const
AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override
Definition NVPTXISelLowering.cpp:6974
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
Definition NVPTXISelLowering.cpp:5342
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
Definition NVPTXISelLowering.cpp:3218
const NVPTXTargetMachine * nvTM
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
Definition NVPTXISelLowering.cpp:4087
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
Definition NVPTXISelLowering.cpp:7204
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
Definition NVPTXISelLowering.cpp:515
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const
Definition NVPTXISelLowering.cpp:1172
unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override
Definition NVPTXISelLowering.cpp:7027
bool useF32FTZ(const MachineFunction &MF) const
Definition NVPTXISelLowering.cpp:150
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
Definition NVPTXISelLowering.cpp:1820
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const
Definition NVPTXISelLowering.cpp:1276
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
Definition NVPTXISelLowering.cpp:1123
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
Definition NVPTXISelLowering.cpp:4010
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
Definition NVPTXISelLowering.cpp:3890
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
Definition NVPTXISelLowering.cpp:4074
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
Definition NVPTXISelLowering.cpp:1798
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Definition NVPTXISelLowering.cpp:7003
std::string getParamName(const Function *F, int Idx) const
Definition NVPTXISelLowering.cpp:5282
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
Definition NVPTXISelLowering.cpp:1116
NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const
Definition NVPTXISelLowering.cpp:123
bool shouldInsertFencesForAtomic(const Instruction *) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Definition NVPTXISelLowering.cpp:6957
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
Definition NVPTXISelLowering.cpp:5235
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
Definition NVPTXISelLowering.cpp:1758
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Definition NVPTXISelLowering.cpp:5364
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Definition NVPTXISelLowering.cpp:5301
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
Definition NVPTXISelLowering.cpp:6985
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Definition NVPTXISelLowering.cpp:6871
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const
Helper for computing alignment of a device function byval parameter.
Definition NVPTXISelLowering.cpp:5256
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
Definition NVPTXISelLowering.cpp:5396
bool usePrecSqrtF32(const SDNode *N=nullptr) const
Definition NVPTXISelLowering.cpp:136
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
Definition NVPTXISelLowering.cpp:3364
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
Definition NVPTXISelLowering.cpp:1369
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
Definition NVPTXISelLowering.cpp:7107
MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override
Definition NVPTXISelLowering.cpp:7053
~NVPTXTargetObjectFile() override
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
unsigned getIROrder() const
Return the node ordering.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
iterator_range< user_iterator > users()
void setFlags(SDNodeFlags NewFlags)
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
SectionKind - This is a simple POD value that classifies the properties of a section.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrNegativeOneBooleanContent
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
MCSymbol * getSymbol(const GlobalValue *GV) const
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
Type * getType() const
All values are typed, get the type of this value.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt pow(const APInt &X, int64_t N)
Compute X^N for N>=0.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ POISON
POISON - A poison node.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ SSUBO
Same for subtraction.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ ADDRESS_SPACE_SHARED_CLUSTER
@ ATOMIC_CMP_SWAP_B128
These nodes are used to lower atomic instructions with i128 type.
bool isPackedVectorTy(EVT VT)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
FunctionAddr VTableAddr Value
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast - Return the argument parameter cast to the specified type.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
bool isReleaseOrStronger(AtomicOrdering AO)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
auto reverse(ContainerTy &&C)
unsigned promoteScalarArgumentSize(unsigned size)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool shouldPassAsArray(Type *Ty)
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool isKernelFunction(const Function &F)
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
Function * getMaybeBitcastedCallee(const CallBase *CB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
ElementCount getVectorElementCount() const
bool is32BitVector() const
Return true if this is a 32-bit vector type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasAllowContract() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
bool isAfterLegalizeDAG() const
bool isBeforeLegalize() const
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)