LLVM: lib/Target/WebAssembly/WebAssemblyISelLowering.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
35#include "llvm/IR/IntrinsicsWebAssembly.h"
40using namespace llvm;
41
42#define DEBUG_TYPE "wasm-lower"
43
47 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
48
49
52
53
55
57
59
61 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
62
67 if (Subtarget->hasSIMD128()) {
74 }
75 if (Subtarget->hasFP16()) {
77 }
78 if (Subtarget->hasReferenceTypes()) {
79 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
80 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
81 if (Subtarget->hasExceptionHandling()) {
83 }
84 }
85
87
88
89
90 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
93 }
94 if (Subtarget->hasSIMD128()) {
95 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
96 MVT::v2f64}) {
99 }
100 }
101 if (Subtarget->hasFP16()) {
104 }
105 if (Subtarget->hasReferenceTypes()) {
106
107
108 for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
111 }
112 }
113
121
122
123
128
129 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64, MVT::v8f16}) {
130 if (!Subtarget->hasFP16() && T == MVT::v8f16) {
131 continue;
132 }
133
135
139
140 for (auto Op :
141 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
143
144
145 for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT,
146 ISD::FRINT, ISD::FROUNDEVEN})
148
151
152
153 if (T != MVT::v8f16) {
156 }
159 }
160
161
162 for (auto Op :
166 for (auto T : {MVT::i32, MVT::i64})
168 if (Subtarget->hasSIMD128())
169 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
171 }
172
173 if (Subtarget->hasWideArithmetic()) {
179 }
180
181 if (Subtarget->hasNontrappingFPToInt())
183 for (auto T : {MVT::i32, MVT::i64})
185
186 if (Subtarget->hasRelaxedSIMD()) {
188 {ISD::FMINNUM, ISD::FMINIMUMNUM, ISD::FMAXNUM, ISD::FMAXIMUMNUM},
189 {MVT::v4f32, MVT::v2f64}, Legal);
190 }
191
192 if (Subtarget->hasSIMD128()) {
193
195
196
198
199
201
202
204
205
207
208
210
211
212
215
216
217
221
223
224
226 for (auto T : {MVT::v16i8, MVT::v8i16})
228
229
230 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
232
233
234 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
235 MVT::v2f64})
237
238 if (Subtarget->hasFP16())
240
241
242 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
243 MVT::v2f64})
245
246 if (Subtarget->hasFP16())
248
249
250 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
251 MVT::v2f64})
253
255
256
258 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
260
261
263 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
264 MVT::v2f64})
266
267
269
270
271 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
272 MVT::v2f64})
274
275
276 for (auto Op :
278 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
280
281
283 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
285
286
290
291
293 for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
295
296
297 for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
298 ISD::FEXP, ISD::FEXP2, ISD::FEXP10})
299 for (auto T : {MVT::v4f32, MVT::v2f64})
301
302
305
306
307 for (auto Op :
309 for (auto T : {MVT::v2i64, MVT::v2f64})
311
312
315 if (Subtarget->hasFP16()) {
317 }
318 }
319
320
325 }
326
327 if (Subtarget->hasFP16()) {
329 }
330
331 if (Subtarget->hasRelaxedSIMD()) {
334 }
335
336
337 for (auto Op : {ISD::PARTIAL_REDUCE_SMLA, ISD::PARTIAL_REDUCE_UMLA}) {
340 }
341 }
342
343
344
346 if (!Subtarget->hasSignExt()) {
347
348 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
349 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
351 }
354
355
359
363
364
365 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
368
369
371
372
373
374
375
376
382 if (Subtarget->hasSIMD128()) {
383 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
384 MVT::v2f64}) {
390 }
391 }
392 }
393
398 }
400 }
401
402
404
405
408
409
413
415
416
417
418
420}
421
425 return MVT::externref;
427 return MVT::funcref;
429}
430
434 return MVT::externref;
436 return MVT::funcref;
438}
439
441WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
442
451 default:
452 break;
453 }
455}
456
457bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
458
460
461
462
464 return false;
465
466
469 return true;
470
471
472
475}
476
477FastISel *WebAssemblyTargetLowering::createFastISel(
480}
481
482MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & ,
483 EVT VT) const {
487
489
490
493 "32-bit shift counts ought to be enough for anyone");
494 }
495
498 "Unable to represent scalar shift amount type");
500}
501
502
503
504
508 bool IsUnsigned, bool Int64,
509 bool Float64, unsigned LoweredOpcode) {
511
512 Register OutReg = MI.getOperand(0).getReg();
513 Register InReg = MI.getOperand(1).getReg();
514
515 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
516 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
517 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
518 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
519 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
520 unsigned Eqz = WebAssembly::EQZ_I32;
521 unsigned And = WebAssembly::AND_I32;
522 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
523 int64_t Substitute = IsUnsigned ? 0 : Limit;
524 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
527
533
535 F->insert(It, FalseMBB);
536 F->insert(It, TrueMBB);
537 F->insert(It, DoneMBB);
538
539
540 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
542
547
548 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
549 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
550 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
551 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
552 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
553 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
554 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
555
556 MI.eraseFromParent();
557
558
559 if (IsUnsigned) {
560 Tmp0 = InReg;
561 } else {
563 }
567
568
569 if (IsUnsigned) {
570 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
572 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
573 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
578 CmpReg = AndReg;
579 }
580
582
583
584
586 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
589 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
594
595 return DoneMBB;
596}
597
598
599
604
610
611
613 if (Def->getOpcode() == WebAssembly::CONST_I32 ||
614 Def->getOpcode() == WebAssembly::CONST_I64) {
615 if (Def->getOperand(1).getImm() == 0) {
616
617 MI.eraseFromParent();
618 return BB;
619 }
620
621 unsigned MemoryCopy =
622 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32;
624 .add(DstMem)
625 .add(SrcMem)
626 .add(Dst)
627 .add(Src)
628 .add(Len);
629 MI.eraseFromParent();
630 return BB;
631 }
632 }
633
634
635
638
639
640 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
641 unsigned MemoryCopy =
642 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32;
643
644
645
646
651
653 F->insert(It, TrueMBB);
654 F->insert(It, DoneMBB);
655
656
657 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
659
660
664
665
666 unsigned EqzReg;
667 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
668
669
670 MI.eraseFromParent();
671
672
674
675
677 .add(DstMem)
678 .add(SrcMem)
679 .add(Dst)
680 .add(Src)
681 .add(Len);
682
683
686
687 return DoneMBB;
688}
689
690
691
696
701
702
704 if (Def->getOpcode() == WebAssembly::CONST_I32 ||
705 Def->getOpcode() == WebAssembly::CONST_I64) {
706 if (Def->getOperand(1).getImm() == 0) {
707
708 MI.eraseFromParent();
709 return BB;
710 }
711
712 unsigned MemoryFill =
713 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32;
715 .add(Mem)
716 .add(Dst)
717 .add(Val)
718 .add(Len);
719 MI.eraseFromParent();
720 return BB;
721 }
722 }
723
724
725
728
729
730 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
731 unsigned MemoryFill =
732 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32;
733
734
735
736
741
743 F->insert(It, TrueMBB);
744 F->insert(It, DoneMBB);
745
746
747 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
749
750
754
755
756 unsigned EqzReg;
757 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
758
759
760 MI.eraseFromParent();
761
762
764
765
767
768
771
772 return DoneMBB;
773}
774
780 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
781 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
782 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
783
784 bool IsIndirect =
786 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
787
788 bool IsFuncrefCall = false;
794 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
796 }
797
798 unsigned CallOp;
799 if (IsIndirect && IsRetCall) {
800 CallOp = WebAssembly::RET_CALL_INDIRECT;
801 } else if (IsIndirect) {
802 CallOp = WebAssembly::CALL_INDIRECT;
803 } else if (IsRetCall) {
804 CallOp = WebAssembly::RET_CALL;
805 } else {
806 CallOp = WebAssembly::CALL;
807 }
808
812
813
814 if (IsIndirect) {
815 auto FnPtr = CallParams.getOperand(0);
817
818
819
820
821
822
823 if (IsFuncrefCall) {
828
831 } else
833 }
834
835 for (auto Def : CallResults.defs())
836 MIB.add(Def);
837
838 if (IsIndirect) {
839
840
842
850 } else {
851
852
853
856 }
857 }
858
859 for (auto Use : CallParams.uses())
861
865
866
867
868
869
870
871
872
873
874
875 if (IsIndirect && IsFuncrefCall) {
883
887 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref);
889
891 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF))
896 }
897
898 return BB;
899}
900
901MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
903 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
905
906 switch (MI.getOpcode()) {
907 default:
909 case WebAssembly::FP_TO_SINT_I32_F32:
911 WebAssembly::I32_TRUNC_S_F32);
912 case WebAssembly::FP_TO_UINT_I32_F32:
914 WebAssembly::I32_TRUNC_U_F32);
915 case WebAssembly::FP_TO_SINT_I64_F32:
917 WebAssembly::I64_TRUNC_S_F32);
918 case WebAssembly::FP_TO_UINT_I64_F32:
920 WebAssembly::I64_TRUNC_U_F32);
921 case WebAssembly::FP_TO_SINT_I32_F64:
923 WebAssembly::I32_TRUNC_S_F64);
924 case WebAssembly::FP_TO_UINT_I32_F64:
926 WebAssembly::I32_TRUNC_U_F64);
927 case WebAssembly::FP_TO_SINT_I64_F64:
929 WebAssembly::I64_TRUNC_S_F64);
930 case WebAssembly::FP_TO_UINT_I64_F64:
932 WebAssembly::I64_TRUNC_U_F64);
933 case WebAssembly::MEMCPY_A32:
935 case WebAssembly::MEMCPY_A64:
937 case WebAssembly::MEMSET_A32:
939 case WebAssembly::MEMSET_A64:
941 case WebAssembly::CALL_RESULTS:
942 case WebAssembly::RET_CALL_RESULTS:
944 }
945}
946
947std::pair<unsigned, const TargetRegisterClass *>
948WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
950
951
952 if (Constraint.size() == 1) {
953 switch (Constraint[0]) {
954 case 'r':
955 assert(VT != MVT::iPTR && "Pointer MVT not expected here");
956 if (Subtarget->hasSIMD128() && VT.isVector()) {
958 return std::make_pair(0U, &WebAssembly::V128RegClass);
959 }
962 return std::make_pair(0U, &WebAssembly::I32RegClass);
964 return std::make_pair(0U, &WebAssembly::I64RegClass);
965 }
968 case 32:
969 return std::make_pair(0U, &WebAssembly::F32RegClass);
970 case 64:
971 return std::make_pair(0U, &WebAssembly::F64RegClass);
972 default:
973 break;
974 }
975 }
976 break;
977 default:
978 break;
979 }
980 }
981
983}
984
985bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
986
987 return true;
988}
989
990bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
991
992 return true;
993}
994
995bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
997 Type *Ty, unsigned AS,
999
1000
1001
1002 if (AM.BaseOffs < 0)
1003 return false;
1004
1005
1006 if (AM.Scale != 0)
1007 return false;
1008
1009
1010 return true;
1011}
1012
1013bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
1016
1017
1018
1019
1020
1021
1024 return true;
1025}
1026
1027bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
1028 AttributeList Attr) const {
1029
1030
1031 return true;
1032}
1033
1034bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
1037 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
1038 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
1039 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
1040}
1041
1042bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
1044
1045 const GlobalValue *GV = GA->getGlobal();
1047}
1048
1049EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
1051 EVT VT) const {
1054
1055
1056
1057
1058
1060}
1061
1062bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1066 switch (Intrinsic) {
1067 case Intrinsic::wasm_memory_atomic_notify:
1069 Info.memVT = MVT::i32;
1070 Info.ptrVal = I.getArgOperand(0);
1071 Info.offset = 0;
1073
1074
1075
1076
1077
1078
1080 return true;
1081 case Intrinsic::wasm_memory_atomic_wait32:
1083 Info.memVT = MVT::i32;
1084 Info.ptrVal = I.getArgOperand(0);
1085 Info.offset = 0;
1088 return true;
1089 case Intrinsic::wasm_memory_atomic_wait64:
1091 Info.memVT = MVT::i64;
1092 Info.ptrVal = I.getArgOperand(0);
1093 Info.offset = 0;
1096 return true;
1097 case Intrinsic::wasm_loadf16_f32:
1099 Info.memVT = MVT::f16;
1100 Info.ptrVal = I.getArgOperand(0);
1101 Info.offset = 0;
1104 return true;
1105 case Intrinsic::wasm_storef16_f32:
1107 Info.memVT = MVT::f16;
1108 Info.ptrVal = I.getArgOperand(1);
1109 Info.offset = 0;
1112 return true;
1113 default:
1114 return false;
1115 }
1116}
1117
1118void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
1121 switch (Op.getOpcode()) {
1122 default:
1123 break;
1125 unsigned IntNo = Op.getConstantOperandVal(0);
1126 switch (IntNo) {
1127 default:
1128 break;
1129 case Intrinsic::wasm_bitmask: {
1131 EVT VT = Op.getOperand(1).getSimpleValueType();
1134 Known.Zero |= ZeroMask;
1135 break;
1136 }
1137 }
1138 break;
1139 }
1140 case WebAssemblyISD::EXTEND_LOW_U:
1141 case WebAssemblyISD::EXTEND_HIGH_U: {
1142
1143 SDValue SrcOp = Op.getOperand(0);
1146 if (VT == MVT::v8i8 || VT == MVT::v16i8) {
1150 } else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
1154 } else if (VT == MVT::v2i32 || VT == MVT::v4i32) {
1158 }
1159 break;
1160 }
1161
1162
1163
1164 case WebAssemblyISD::I64_ADD128:
1165 if (Op.getResNo() == 1) {
1166 SDValue LHS_HI = Op.getOperand(1);
1167 SDValue RHS_HI = Op.getOperand(3);
1170 }
1171 break;
1172 }
1173}
1174
1176WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
1179
1180
1181
1182 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
1183 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
1185 }
1186
1188}
1189
1190bool WebAssemblyTargetLowering::isFMAFasterThanFMulAndFAdd(
1192 if (!Subtarget->hasFP16() || !VT.isVector())
1193 return false;
1194
1197 return false;
1198
1200}
1201
1202bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts(
1203 SDValue Op, const TargetLoweringOpt &TLO) const {
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221 if (Op.getOpcode() == ISD::BUILD_VECTOR && TLO.LegalOps && TLO.LegalTys)
1222 return false;
1223 return true;
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1239
1240
1242
1243
1244
1245
1253}
1254
1256WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
1258 SelectionDAG &DAG = CLI.DAG;
1259 SDLoc DL = CLI.DL;
1260 SDValue Chain = CLI.Chain;
1264
1268 "WebAssembly doesn't support language-specific or target-specific "
1269 "calling conventions yet");
1270 if (CLI.IsPatchPoint)
1271 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
1272
1273 if (CLI.IsTailCall) {
1274 auto NoTail = [&](const char *Msg) {
1275 if (CLI.CB && CLI.CB->isMustTailCall())
1277 CLI.IsTailCall = false;
1278 };
1279
1280 if (!Subtarget->hasTailCall())
1281 NoTail("WebAssembly 'tail-call' feature not enabled");
1282
1283
1284 if (CLI.IsVarArg)
1285 NoTail("WebAssembly does not support varargs tail calls");
1286
1287
1290 Type *RetTy = F.getReturnType();
1295 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
1296 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
1297 CalleeRetTys.begin());
1298 if (!TypesMatch)
1299 NoTail("WebAssembly tail call requires caller and callee return types to "
1300 "match");
1301
1302
1303 if (CLI.CB) {
1304 for (auto &Arg : CLI.CB->args()) {
1305 Value *Val = Arg.get();
1306
1307 while (true) {
1310 Src = GEP->getPointerOperand();
1311 if (Val == Src)
1312 break;
1313 Val = Src;
1314 }
1316 NoTail(
1317 "WebAssembly does not support tail calling with stack arguments");
1318 break;
1319 }
1320 }
1321 }
1322 }
1323
1324 SmallVectorImplISD::InputArg &Ins = CLI.Ins;
1325 SmallVectorImplISD::OutputArg &Outs = CLI.Outs;
1326 SmallVectorImpl &OutVals = CLI.OutVals;
1327
1328
1329
1330
1332 Outs[0].Flags.isSRet()) {
1334 std::swap(OutVals[0], OutVals[1]);
1335 }
1336
1337 bool HasSwiftSelfArg = false;
1338 bool HasSwiftErrorArg = false;
1339 unsigned NumFixedArgs = 0;
1340 for (unsigned I = 0; I < Outs.size(); ++I) {
1341 const ISD::OutputArg &Out = Outs[I];
1342 SDValue &OutVal = OutVals[I];
1346 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1348 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1350 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1352 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1357 false);
1361 Chain = DAG.getMemcpy(Chain, DL, FINode, OutVal, SizeNode,
1363 false, false,
1364 nullptr, std::nullopt, MachinePointerInfo(),
1365 MachinePointerInfo());
1366 OutVal = FINode;
1367 }
1368
1370 }
1371
1372 bool IsVarArg = CLI.IsVarArg;
1374
1375
1376
1377
1378
1381 if (!HasSwiftSelfArg) {
1382 NumFixedArgs++;
1383 ISD::ArgFlagsTy Flags;
1384 Flags.setSwiftSelf();
1385 ISD::OutputArg Arg(Flags, PtrVT, EVT(PtrVT), PtrTy, 0, 0);
1386 CLI.Outs.push_back(Arg);
1388 CLI.OutVals.push_back(ArgVal);
1389 }
1390 if (!HasSwiftErrorArg) {
1391 NumFixedArgs++;
1392 ISD::ArgFlagsTy Flags;
1393 Flags.setSwiftError();
1394 ISD::OutputArg Arg(Flags, PtrVT, EVT(PtrVT), PtrTy, 0, 0);
1395 CLI.Outs.push_back(Arg);
1397 CLI.OutVals.push_back(ArgVal);
1398 }
1399 }
1400
1401
1403 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1404
1405 if (IsVarArg) {
1406
1407
1408 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
1409 const ISD::OutputArg &Out = Outs[I];
1412 assert(VT != MVT::iPTR && "Legalized args should be concrete");
1414 Align Alignment =
1417 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1421 }
1422 }
1423
1424 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1425
1427 if (IsVarArg && NumBytes) {
1428
1429
1430 MaybeAlign StackAlign = Layout.getStackAlignment();
1431 assert(StackAlign && "data layout string is missing stack alignment");
1433 false);
1434 unsigned ValNo = 0;
1437 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1438 "ArgLocs should remain in order and only hold varargs args");
1439 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1446 }
1447 if (!Chains.empty())
1449 } else if (IsVarArg) {
1451 }
1452
1454
1455
1456
1463 }
1464
1465
1467 Ops.push_back(Chain);
1468 Ops.push_back(Callee);
1469
1470
1471
1473 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1474
1475 if (IsVarArg)
1476 Ops.push_back(FINode);
1477
1479 for (const auto &In : Ins) {
1480 assert(.Flags.isByVal() && "byval is not valid for return values");
1481 assert(.Flags.isNest() && "nest is not valid for return values");
1482 if (In.Flags.isInAlloca())
1483 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1484 if (In.Flags.isInConsecutiveRegs())
1485 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1486 if (In.Flags.isInConsecutiveRegsLast())
1488 "WebAssembly hasn't implemented cons regs last return values");
1489
1490
1492 }
1493
1494
1495
1497 CLI.CB->getCalledOperand()->getType())) {
1498
1499
1500
1501
1502
1503
1505
1510 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
1512 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
1513 MVT::funcref,
1514
1515 MachinePointerInfo(
1517 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
1519
1520 Ops[0] = TableSet;
1521 }
1522
1523 if (CLI.IsTailCall) {
1524
1525 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1527 }
1528
1530 SDVTList InTyList = DAG.getVTList(InTys);
1532
1533 for (size_t I = 0; I < Ins.size(); ++I)
1535
1536
1537 return Res.getValue(Ins.size());
1538}
1539
1540bool WebAssemblyTargetLowering::CanLowerReturn(
1543 const Type *RetTy) const {
1544
1546}
1547
1548SDValue WebAssemblyTargetLowering::LowerReturn(
1554 "MVP WebAssembly can only return up to one value");
1556 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1557
1559 RetOps.append(OutVals.begin(), OutVals.end());
1560 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1561
1562
1563 for (const ISD::OutputArg &Out : Outs) {
1564 assert(!Out.Flags.isByVal() && "byval is not valid for return values");
1565 assert(!Out.Flags.isNest() && "nest is not valid for return values");
1568 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1570 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1572 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1573 }
1574
1575 return Chain;
1576}
1577
1578SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1583 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1584
1586 auto *MFI = MF.getInfo();
1587
1588
1589
1591
1592 bool HasSwiftErrorArg = false;
1593 bool HasSwiftSelfArg = false;
1594 for (const ISD::InputArg &In : Ins) {
1595 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1596 HasSwiftErrorArg |= In.Flags.isSwiftError();
1597 if (In.Flags.isInAlloca())
1598 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1599 if (In.Flags.isNest())
1600 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1601 if (In.Flags.isInConsecutiveRegs())
1602 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1603 if (In.Flags.isInConsecutiveRegsLast())
1604 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1605
1606
1609 DL, MVT::i32))
1611
1612
1613 MFI->addParam(In.VT);
1614 }
1615
1616
1617
1618
1619
1622 if (!HasSwiftSelfArg) {
1623 MFI->addParam(PtrVT);
1624 }
1625 if (!HasSwiftErrorArg) {
1626 MFI->addParam(PtrVT);
1627 }
1628 }
1629
1630
1631 if (IsVarArg) {
1635 MFI->setVarargBufferVreg(VarargVreg);
1637 Chain, DL, VarargVreg,
1638 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1640 MFI->addParam(PtrVT);
1641 }
1642
1643
1649 MFI->addResult(VT);
1650
1651
1652 assert(MFI->getParams().size() == Params.size() &&
1653 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1654 Params.begin()));
1655
1656 return Chain;
1657}
1658
1659void WebAssemblyTargetLowering::ReplaceNodeResults(
1661 switch (N->getOpcode()) {
1663
1664
1665
1666
1667 break;
1670
1671
1672 break;
1675 Results.push_back(Replace128Op(N, DAG));
1676 break;
1677 default:
1679 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1680 }
1681}
1682
1683
1684
1685
1686
1687SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1690 switch (Op.getOpcode()) {
1691 default:
1695 return LowerFrameIndex(Op, DAG);
1697 return LowerGlobalAddress(Op, DAG);
1699 return LowerGlobalTLSAddress(Op, DAG);
1701 return LowerExternalSymbol(Op, DAG);
1703 return LowerJumpTable(Op, DAG);
1704 case ISD::BR_JT:
1705 return LowerBR_JT(Op, DAG);
1706 case ISD::VASTART:
1707 return LowerVASTART(Op, DAG);
1709 case ISD::BRIND:
1710 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1713 return LowerRETURNADDR(Op, DAG);
1715 return LowerFRAMEADDR(Op, DAG);
1717 return LowerCopyToReg(Op, DAG);
1720 return LowerAccessVectorElement(Op, DAG);
1724 return LowerIntrinsic(Op, DAG);
1726 return LowerSIGN_EXTEND_INREG(Op, DAG);
1730 return LowerEXTEND_VECTOR_INREG(Op, DAG);
1732 return LowerBUILD_VECTOR(Op, DAG);
1734 return LowerVECTOR_SHUFFLE(Op, DAG);
1736 return LowerSETCC(Op, DAG);
1740 return LowerShift(Op, DAG);
1743 return LowerFP_TO_INT_SAT(Op, DAG);
1744 case ISD::LOAD:
1745 return LowerLoad(Op, DAG);
1746 case ISD::STORE:
1747 return LowerStore(Op, DAG);
1756 return LowerMUL_LOHI(Op, DAG);
1758 return LowerUADDO(Op, DAG);
1759 }
1760}
1761
1765
1766 return false;
1767}
1768
1772 if (!FI)
1773 return std::nullopt;
1774
1777}
1778
1786
1788 if (->isUndef())
1789 report_fatal_error("unexpected offset when storing to webassembly global",
1790 false);
1791
1792 SDVTList Tys = DAG.getVTList(MVT::Other);
1796 }
1797
1799 if (->isUndef())
1800 report_fatal_error("unexpected offset when storing to webassembly local",
1801 false);
1802
1804 SDVTList Tys = DAG.getVTList(MVT::Other);
1806 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1807 }
1808
1811 "Encountered an unlowerable store to the wasm_var address space",
1812 false);
1813
1814 return Op;
1815}
1816
1823
1825 if (->isUndef())
1827 "unexpected offset when loading from webassembly global", false);
1828
1833 }
1834
1836 if (->isUndef())
1838 "unexpected offset when loading from webassembly local", false);
1839
1842 return DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, {LocalVT, MVT::Other},
1844 }
1845
1848 "Encountered an unlowerable load from the wasm_var address space",
1849 false);
1850
1851 return Op;
1852}
1853
1856 assert(Subtarget->hasWideArithmetic());
1857 assert(Op.getValueType() == MVT::i64);
1859 unsigned Opcode;
1860 switch (Op.getOpcode()) {
1862 Opcode = WebAssemblyISD::I64_MUL_WIDE_U;
1863 break;
1865 Opcode = WebAssemblyISD::I64_MUL_WIDE_S;
1866 break;
1867 default:
1869 }
1877}
1878
1879
1880
1881
1882
1883
1886 assert(Subtarget->hasWideArithmetic());
1887 assert(Op.getValueType() == MVT::i64);
1894 DAG.getNode(WebAssemblyISD::I64_ADD128, DL,
1895 DAG.getVTList(MVT::i64, MVT::i64), LHS, Zero, RHS, Zero);
1900}
1901
1902SDValue WebAssemblyTargetLowering::Replace128Op(SDNode *N,
1904 assert(Subtarget->hasWideArithmetic());
1905 assert(N->getValueType(0) == MVT::i128);
1907 unsigned Opcode;
1908 switch (N->getOpcode()) {
1910 Opcode = WebAssemblyISD::I64_ADD128;
1911 break;
1913 Opcode = WebAssemblyISD::I64_SUB128;
1914 break;
1915 default:
1917 }
1920
1928 LHS_0, LHS_1, RHS_0, RHS_1);
1931}
1932
1933SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1937
1938
1939
1940
1941
1942 SDValue Chain = Op.getOperand(0);
1945 EVT VT = Src.getValueType();
1947 : WebAssembly::COPY_I64,
1948 DL, VT, Src),
1949 0);
1950 return Op.getNode()->getNumValues() == 1
1955 }
1957}
1958
1959SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1963}
1964
1965SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1968
1969 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1971 "Non-Emscripten WebAssembly hasn't implemented "
1972 "__builtin_return_address");
1974 }
1975
1976 unsigned Depth = Op.getConstantOperandVal(0);
1978 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1979 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1980 .first;
1981}
1982
1983SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1985
1986
1987
1988 if (Op.getConstantOperandVal(0) > 0)
1990
1992 EVT VT = Op.getValueType();
1994 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1996}
1997
1999WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2003
2005 if (!MF.getSubtarget().hasBulkMemory())
2006 report_fatal_error("cannot use thread-local storage without bulk memory",
2007 false);
2008
2009 const GlobalValue *GV = GA->getGlobal();
2010
2011
2012
2013
2014 auto model = Subtarget->getTargetTriple().isOSEmscripten()
2017
2018
2021
2026
2027
2029 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
2030 : WebAssembly::GLOBAL_GET_I32;
2032
2036 0);
2037
2041 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset);
2042
2044 }
2045
2047
2048 EVT VT = Op.getValueType();
2049 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
2053}
2054
2055SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
2059 EVT VT = Op.getValueType();
2061 "Unexpected target flags on generic GlobalAddressSDNode");
2063 fail(DL, DAG, "Invalid address space for WebAssembly target");
2064
2066 const GlobalValue *GV = GA->getGlobal();
2067
2068
2074 const char *BaseName;
2078 } else {
2081 }
2083 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
2085
2087 WebAssemblyISD::WrapperREL, DL, VT,
2089 OperandFlags));
2090
2092 }
2094 }
2095
2096 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
2098 GA->getOffset(), OperandFlags));
2099}
2100
2102WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
2106 EVT VT = Op.getValueType();
2107 assert(ES->getTargetFlags() == 0 &&
2108 "Unexpected target flags on generic ExternalSymbolSDNode");
2109 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
2111}
2112
2113SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
2115
2116
2117
2120 JT->getTargetFlags());
2121}
2122
2126 SDValue Chain = Op.getOperand(0);
2129 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
2130
2132 Ops.push_back(Chain);
2133 Ops.push_back(Index);
2134
2136 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
2137
2138
2139 for (auto *MBB : MBBs)
2141
2142
2143
2144
2146 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
2147}
2148
2153
2156
2158 MFI->getVarargBufferVreg(), PtrVT);
2159 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
2160 MachinePointerInfo(SV));
2161}
2162
2163SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
2166 unsigned IntNo;
2167 switch (Op.getOpcode()) {
2170 IntNo = Op.getConstantOperandVal(1);
2171 break;
2173 IntNo = Op.getConstantOperandVal(0);
2174 break;
2175 default:
2177 }
2179
2180 switch (IntNo) {
2181 default:
2182 return SDValue();
2183
2184 case Intrinsic::wasm_lsda: {
2193 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
2196 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, Node);
2198 }
2200 return DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, Node);
2201 }
2202
2203 case Intrinsic::wasm_shuffle: {
2204
2206 size_t OpIdx = 0;
2209 while (OpIdx < 18) {
2214 } else {
2216 }
2217 }
2218 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2219 }
2220
2221 case Intrinsic::thread_pointer: {
2223 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
2224 : WebAssembly::GLOBAL_GET_I32;
2229 0);
2230 }
2231 }
2232}
2233
2235WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2238
2239
2240
2241
2242
2243
2244
2245 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
2248
2249 const SDValue &Extract = Op.getOperand(0);
2253 MVT ExtractedLaneT =
2254 cast(Op.getOperand(1).getNode())->getVT().getSimpleVT();
2255 MVT ExtractedVecT =
2257 if (ExtractedVecT == VecT)
2258 return Op;
2259
2260
2264 unsigned IndexVal = Index->getAsZExtVal();
2265 unsigned Scale =
2274 Op.getOperand(1));
2275}
2276
2281
2282 assert((UserOpc == WebAssemblyISD::EXTEND_LOW_U ||
2283 UserOpc == WebAssemblyISD::EXTEND_LOW_S) &&
2284 "expected extend_low");
2286
2288
2289 size_t FirstIdx = Mask.size() / 2;
2290 for (size_t i = 0; i < Mask.size() / 2; ++i) {
2291 if (Mask[i] != static_cast<int>(FirstIdx + i)) {
2293 }
2294 }
2295
2297 unsigned Opc = UserOpc == WebAssemblyISD::EXTEND_LOW_S
2298 ? WebAssemblyISD::EXTEND_HIGH_S
2299 : WebAssemblyISD::EXTEND_HIGH_U;
2300 return DAG.getNode(Opc, DL, VT, Shuffle->getOperand(0));
2301}
2302
2304WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(SDValue Op,
2307 EVT VT = Op.getValueType();
2309 EVT SrcVT = Src.getValueType();
2310
2314
2316 "Unexpected extension factor.");
2318
2319 if (Scale != 2 && Scale != 4 && Scale != 8)
2321
2322 unsigned Ext;
2323 switch (Op.getOpcode()) {
2324 default:
2328 Ext = WebAssemblyISD::EXTEND_LOW_U;
2329 break;
2331 Ext = WebAssemblyISD::EXTEND_LOW_S;
2332 break;
2333 }
2334
2335 if (Scale == 2) {
2336
2337 if (auto ExtendHigh = GetExtendHigh(Op.getOperand(0), Ext, VT, DAG))
2338 return ExtendHigh;
2339 }
2340
2342 while (Scale != 1) {
2347 Ret);
2348 Scale /= 2;
2349 }
2351 return Ret;
2352}
2353
2356 if (Op.getValueType() != MVT::v2f64)
2358
2359 auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec,
2360 unsigned &Index) -> bool {
2361 switch (Op.getOpcode()) {
2363 Opcode = WebAssemblyISD::CONVERT_LOW_S;
2364 break;
2366 Opcode = WebAssemblyISD::CONVERT_LOW_U;
2367 break;
2368 case ISD::FP_EXTEND:
2369 Opcode = WebAssemblyISD::PROMOTE_LOW;
2370 break;
2371 default:
2372 return false;
2373 }
2374
2375 auto ExtractVector = Op.getOperand(0);
2377 return false;
2378
2380 return false;
2381
2382 SrcVec = ExtractVector.getOperand(0);
2383 Index = ExtractVector.getConstantOperandVal(1);
2384 return true;
2385 };
2386
2387 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
2388 SDValue LHSSrcVec, RHSSrcVec;
2389 if (!GetConvertedLane(Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) ||
2390 !GetConvertedLane(Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex))
2392
2393 if (LHSOpcode != RHSOpcode)
2395
2396 MVT ExpectedSrcVT;
2397 switch (LHSOpcode) {
2398 case WebAssemblyISD::CONVERT_LOW_S:
2399 case WebAssemblyISD::CONVERT_LOW_U:
2400 ExpectedSrcVT = MVT::v4i32;
2401 break;
2402 case WebAssemblyISD::PROMOTE_LOW:
2403 ExpectedSrcVT = MVT::v4f32;
2404 break;
2405 }
2406 if (LHSSrcVec.getValueType() != ExpectedSrcVT)
2408
2409 auto Src = LHSSrcVec;
2410 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
2411
2413 ExpectedSrcVT, DL, LHSSrcVec, RHSSrcVec,
2414 {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1});
2415 }
2416 return DAG.getNode(LHSOpcode, DL, MVT::v2f64, Src);
2417}
2418
2419SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2421 MVT VT = Op.getSimpleValueType();
2422 if (VT == MVT::v8f16) {
2423
2424
2431 }
2432
2434 return ConvertLow;
2435
2437 const EVT VecT = Op.getValueType();
2438 const EVT LaneT = Op.getOperand(0).getValueType();
2440 bool CanSwizzle = VecT == MVT::v16i8;
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2454 };
2455
2456
2457
2458
2459
2460
2461 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
2464 return Bail;
2465 const SDValue &SwizzleSrc = Lane->getOperand(0);
2466 const SDValue &IndexExt = Lane->getOperand(1);
2468 return Bail;
2471 return Bail;
2472 const SDValue &SwizzleIndices = Index->getOperand(0);
2473 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
2474 SwizzleIndices.getValueType() != MVT::v16i8 ||
2476 Index->getConstantOperandVal(1) != I)
2477 return Bail;
2478 return std::make_pair(SwizzleSrc, SwizzleIndices);
2479 };
2480
2481
2482
2483
2484
2485 auto GetShuffleSrc = [&](const SDValue &Lane) {
2490 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
2493 return Lane->getOperand(0);
2494 };
2495
2496 using ValueEntry = std::pair<SDValue, size_t>;
2498
2499 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
2501
2502 using ShuffleEntry = std::pair<SDValue, size_t>;
2504
2505 auto AddCount = [](auto &Counts, const auto &Val) {
2506 auto CountIt =
2507 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
2508 if (CountIt == Counts.end()) {
2509 Counts.emplace_back(Val, 1);
2510 } else {
2511 CountIt->second++;
2512 }
2513 };
2514
2515 auto GetMostCommon = [](auto &Counts) {
2517 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
2518 return *CommonIt;
2519 };
2520
2521 size_t NumConstantLanes = 0;
2522
2523
2524 for (size_t I = 0; I < Lanes; ++I) {
2525 const SDValue &Lane = Op->getOperand(I);
2527 continue;
2528
2529 AddCount(SplatValueCounts, Lane);
2530
2532 NumConstantLanes++;
2533 if (auto ShuffleSrc = GetShuffleSrc(Lane))
2534 AddCount(ShuffleCounts, ShuffleSrc);
2535 if (CanSwizzle) {
2536 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
2537 if (SwizzleSrcs.first)
2538 AddCount(SwizzleCounts, SwizzleSrcs);
2539 }
2540 }
2541
2543 size_t NumSplatLanes;
2544 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
2545
2548 size_t NumSwizzleLanes = 0;
2549 if (SwizzleCounts.size())
2550 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
2551 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2552
2553
2554
2555 SDValue ShuffleSrc1, ShuffleSrc2;
2556 size_t NumShuffleLanes = 0;
2557 if (ShuffleCounts.size()) {
2558 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2560 [&](const auto &Pair) { return Pair.first == ShuffleSrc1; });
2561 }
2562 if (ShuffleCounts.size()) {
2563 size_t AdditionalShuffleLanes;
2564 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
2565 GetMostCommon(ShuffleCounts);
2566 NumShuffleLanes += AdditionalShuffleLanes;
2567 }
2568
2569
2570
2571 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
2573
2574 if (NumSwizzleLanes >= NumShuffleLanes &&
2575 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2576 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
2577 SwizzleIndices);
2578 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
2579 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
2580 return Swizzled == GetSwizzleSrcs(I, Lane);
2581 };
2582 } else if (NumShuffleLanes >= NumConstantLanes &&
2583 NumShuffleLanes >= NumSplatLanes) {
2586 size_t Scale1 = 1;
2587 size_t Scale2 = 1;
2588 SDValue Src1 = ShuffleSrc1;
2589 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
2591 size_t LaneSize =
2593 assert(LaneSize > DestLaneSize);
2594 Scale1 = LaneSize / DestLaneSize;
2596 }
2598 size_t LaneSize =
2600 assert(LaneSize > DestLaneSize);
2601 Scale2 = LaneSize / DestLaneSize;
2603 }
2604
2605 int Mask[16];
2606 assert(DestLaneCount <= 16);
2607 for (size_t I = 0; I < DestLaneCount; ++I) {
2608 const SDValue &Lane = Op->getOperand(I);
2609 SDValue Src = GetShuffleSrc(Lane);
2610 if (Src == ShuffleSrc1) {
2612 } else if (Src && Src == ShuffleSrc2) {
2614 } else {
2616 }
2617 }
2618 ArrayRef MaskRef(Mask, DestLaneCount);
2620 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
2621 auto Src = GetShuffleSrc(Lane);
2622 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2623 };
2624 } else if (NumConstantLanes >= NumSplatLanes) {
2626 for (const SDValue &Lane : Op->op_values()) {
2628
2629
2630
2631
2632 uint64_t LaneBits = 128 / Lanes;
2635 Const->getAPIntValue().trunc(LaneBits).getZExtValue(),
2636 SDLoc(Lane), LaneT));
2637 } else {
2639 }
2642 } else {
2644 }
2645 }
2649 };
2650 } else {
2652 if (NumSplatLanes == 1 && Op->getOperand(0) == SplatValue &&
2653 (DestLaneSize == 32 || DestLaneSize == 64)) {
2654
2656 } else {
2657
2659 }
2660 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
2661 return Lane == SplatValue;
2662 };
2663 }
2664
2666 assert(IsLaneConstructed);
2667
2668
2669 for (size_t I = 0; I < Lanes; ++I) {
2670 const SDValue &Lane = Op->getOperand(I);
2671 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
2674 }
2675
2677}
2678
2680WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2684 MVT VecType = Op.getOperand(0).getSimpleValueType();
2687
2688
2690 size_t OpIdx = 0;
2693
2694
2695 for (int M : Mask) {
2696 for (size_t J = 0; J < LaneBytes; ++J) {
2697
2698
2699
2700 uint64_t ByteIndex = M == -1 ? J : (uint64_t)M * LaneBytes + J;
2702 }
2703 }
2704
2705 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2706}
2707
2711
2712
2713 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2717 const SDValue &CC = Op->getOperand(2);
2718 auto MakeLane = [&](unsigned I) {
2721 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
2722 };
2724 {MakeLane(0), MakeLane(1)});
2725}
2726
2728WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
2730
2733
2737 DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32);
2738 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Ops);
2739 }
2740
2742}
2743
2745 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2746
2747 if (LaneT.bitsGE(MVT::i32))
2749
2751 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2753 unsigned ShiftOpcode = Op.getOpcode();
2759 for (size_t i = 0; i < NumLanes; ++i) {
2760 SDValue MaskedShiftValue =
2762 SDValue ShiftedValue = ShiftedElements[i];
2763 if (ShiftOpcode == ISD::SRA)
2767 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2768 }
2770}
2771
2775
2776
2777 assert(Op.getSimpleValueType().isVector());
2778
2779 uint64_t LaneBits = Op.getValueType().getScalarSizeInBits();
2780 auto ShiftVal = Op.getOperand(1);
2781
2782
2783 auto SkipImpliedMask = [](SDValue MaskOp, uint64_t MaskBits) {
2785 return MaskOp;
2789 APInt MaskVal;
2792
2794 MaskVal == MaskBits)
2795 MaskOp = LHS;
2796 } else {
2799
2801 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits)
2802 MaskOp = LHS;
2803 }
2804
2805 return MaskOp;
2806 };
2807
2808
2809 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2811 if (!ShiftVal)
2813
2814
2815 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2816
2818
2819 unsigned Opcode;
2820 switch (Op.getOpcode()) {
2822 Opcode = WebAssemblyISD::VEC_SHL;
2823 break;
2825 Opcode = WebAssemblyISD::VEC_SHR_S;
2826 break;
2828 Opcode = WebAssemblyISD::VEC_SHR_U;
2829 break;
2830 default:
2832 }
2833
2834 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2835}
2836
2837SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2839 EVT ResT = Op.getValueType();
2841
2842 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2843 (SatVT == MVT::i32 || SatVT == MVT::i64))
2844 return Op;
2845
2846 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2847 return Op;
2848
2849 if (ResT == MVT::v8i16 && SatVT == MVT::i16)
2850 return Op;
2851
2853}
2854
2855
2856
2857
2860 auto &DAG = DCI.DAG;
2862
2863
2864
2865
2866
2867 SDValue Bitcast = N->getOperand(0);
2868 if (Bitcast.getOpcode() != ISD::BITCAST)
2870 if (->getOperand(1).isUndef())
2872 SDValue CastOp = Bitcast.getOperand(0);
2874 EVT DstType = Bitcast.getValueType();
2875 if (!SrcType.is128BitVector() ||
2876 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2879 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2880 return DAG.getBitcast(DstType, NewShuffle);
2881}
2882
2883
2884
2885
2889 auto &DAG = DCI.DAG;
2892
2893 EVT InVT = N->getOperand(0)->getValueType(0);
2894 EVT ResVT = N->getValueType(0);
2895 MVT ExtVT;
2896 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8))
2897 ExtVT = MVT::v4i32;
2898 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8))
2899 ExtVT = MVT::v2i32;
2900 else
2902
2903 unsigned Op =
2906 return DAG.getNode(N->getOpcode(), SDLoc(N), ResVT, Conv);
2907}
2908
2912 auto &DAG = DCI.DAG;
2913
2915 SDValue Op0 = N->getOperand(0);
2916 EVT VT = N->getValueType(0);
2917
2918
2919
2920
2923 }
2924
2926}
2927
2930 auto &DAG = DCI.DAG;
2933
2934
2935
2936 auto Extract = N->getOperand(0);
2939 auto Source = Extract.getOperand(0);
2941 if (IndexNode == nullptr)
2943 auto Index = IndexNode->getZExtValue();
2944
2945
2946
2947 EVT ResVT = N->getValueType(0);
2948 if (ResVT == MVT::v8i16) {
2950 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2952 } else if (ResVT == MVT::v4i32) {
2954 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2956 } else if (ResVT == MVT::v2i64) {
2958 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2960 } else {
2962 }
2963
2965 bool IsLow = Index == 0;
2966
2967 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2968 : WebAssemblyISD::EXTEND_HIGH_S)
2969 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2970 : WebAssemblyISD::EXTEND_HIGH_U);
2971
2973}
2974
2977 auto &DAG = DCI.DAG;
2978
2979 auto GetWasmConversionOp = [](unsigned Op) {
2980 switch (Op) {
2982 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2984 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2986 return WebAssemblyISD::DEMOTE_ZERO;
2987 }
2989 };
2990
2991 auto IsZeroSplat = [](SDValue SplatVal) {
2993 APInt SplatValue, SplatUndef;
2994 unsigned SplatBitSize;
2995 bool HasAnyUndefs;
2996
2997
2999 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
3000 HasAnyUndefs) &&
3001 SplatValue == 0;
3002 };
3003
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016 EVT ResVT;
3017 EVT ExpectedConversionType;
3019 auto ConversionOp = Conversion.getOpcode();
3020 switch (ConversionOp) {
3023 ResVT = MVT::v4i32;
3024 ExpectedConversionType = MVT::v2i32;
3025 break;
3027 ResVT = MVT::v4f32;
3028 ExpectedConversionType = MVT::v2f32;
3029 break;
3030 default:
3032 }
3033
3034 if (N->getValueType(0) != ResVT)
3036
3037 if (Conversion.getValueType() != ExpectedConversionType)
3039
3040 auto Source = Conversion.getOperand(0);
3041 if (Source.getValueType() != MVT::v2f64)
3043
3044 if (!IsZeroSplat(N->getOperand(1)) ||
3045 N->getOperand(1).getValueType() != ExpectedConversionType)
3047
3048 unsigned Op = GetWasmConversionOp(ConversionOp);
3050 }
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063 EVT ResVT;
3064 auto ConversionOp = N->getOpcode();
3065 switch (ConversionOp) {
3068 ResVT = MVT::v4i32;
3069 break;
3071 ResVT = MVT::v4f32;
3072 break;
3073 default:
3075 }
3076
3077 if (N->getValueType(0) != ResVT)
3079
3080 auto Concat = N->getOperand(0);
3081 if (Concat.getValueType() != MVT::v4f64)
3083
3084 auto Source = Concat.getOperand(0);
3085 if (Source.getValueType() != MVT::v2f64)
3087
3088 if (!IsZeroSplat(Concat.getOperand(1)) ||
3089 Concat.getOperand(1).getValueType() != MVT::v2f64)
3091
3092 unsigned Op = GetWasmConversionOp(ConversionOp);
3094}
3095
3096
3098 const SDLoc &DL, unsigned VectorWidth) {
3101 unsigned Factor = VT.getSizeInBits() / VectorWidth;
3104
3105
3106 unsigned ElemsPerChunk = VectorWidth / ElVT.getSizeInBits();
3107 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
3108
3109
3110
3111 IdxVal &= ~(ElemsPerChunk - 1);
3112
3113
3116 Vec->ops().slice(IdxVal, ElemsPerChunk));
3117
3120}
3121
3122
3123
3124
3125
3128 EVT SrcVT = In.getValueType();
3129
3130
3131 if (SrcVT == DstVT)
3132 return In;
3133
3140
3143
3144
3145
3146 EVT InVT = MVT::i16, OutVT = MVT::i8;
3148 InVT = MVT::i32;
3149 OutVT = MVT::i16;
3150 }
3151 unsigned SubSizeInBits = SrcSizeInBits / 2;
3153 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
3154
3155
3158
3159
3165 }
3166
3167
3171
3175}
3176
3179 auto &DAG = DCI.DAG;
3180
3181 SDValue In = N->getOperand(0);
3182 EVT InVT = In.getValueType();
3185
3186 EVT OutVT = N->getValueType(0);
3189
3192
3193 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
3194 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.is128BitVector()))
3196
3202}
3203
3207 auto &DAG = DCI.DAG;
3209 SDValue Src = N->getOperand(0);
3210 EVT VT = N->getValueType(0);
3211 EVT SrcVT = Src.getValueType();
3212
3216
3219
3220
3221
3222 if (NumElts == 2 || NumElts == 4 || NumElts == 8 || NumElts == 16) {
3225 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32),
3226 DAG.getSExtOrTrunc(N->getOperand(0), DL,
3227 SrcVT.changeVectorElementType(Width))}),
3228 DL, VT);
3229 }
3230
3231
3232 if (NumElts == 32 || NumElts == 64) {
3233
3234
3235
3238
3244
3247
3249 for (size_t I = 0; I < Concat->ops().size(); I++) {
3251 MVT::i16,
3254 DAG, DL, 128),
3255 SetCond)));
3256 }
3257
3258 MVT ReturnType = VectorsToShuffle.size() == 2 ? MVT::i32 : MVT::i64;
3260
3261 for (SDValue V : VectorsToShuffle) {
3262 ReturningInteger = DAG.getNode(
3265
3267 ReturningInteger =
3268 DAG.getNode(ISD::ADD, DL, ReturnType, {ReturningInteger, ExtendedV});
3269 }
3270
3271 return ReturningInteger;
3272 }
3273
3275}
3276
3278
3279
3280
3281
3284
3286 if (N->getNumOperands() < 2 ||
3290 EVT LT = LHS.getValueType();
3291 if (LT.getScalarSizeInBits() > 128 / LT.getVectorNumElements())
3293
3294 auto CombineSetCC = [&N, &DAG](Intrinsic::WASMIntrinsics InPre,
3296 Intrinsic::WASMIntrinsics InPost) {
3297 if (N->getConstantOperandVal(0) != InPre)
3299
3304
3308 {DAG.getConstant(InPost, DL, MVT::i32), LHS}),
3309 DL, MVT::i1);
3311 Ret = DAG.getNOT(DL, Ret, MVT::i1);
3313 };
3314
3315 if (SDValue AnyTrueEQ = CombineSetCC(Intrinsic::wasm_anytrue, ISD::SETEQ,
3316 Intrinsic::wasm_alltrue))
3317 return AnyTrueEQ;
3318 if (SDValue AllTrueEQ = CombineSetCC(Intrinsic::wasm_alltrue, ISD::SETEQ,
3319 Intrinsic::wasm_anytrue))
3320 return AllTrueEQ;
3321 if (SDValue AnyTrueNE = CombineSetCC(Intrinsic::wasm_anytrue, ISD::SETNE,
3322 Intrinsic::wasm_anytrue))
3323 return AnyTrueNE;
3324 if (SDValue AllTrueNE = CombineSetCC(Intrinsic::wasm_alltrue, ISD::SETNE,
3325 Intrinsic::wasm_alltrue))
3326 return AllTrueNE;
3327
3329}
3330
3331template <int MatchRHS, ISD::CondCode MatchCond, bool RequiresNegate,
3339
3342
3346 {DAG.getConstant(Intrin, DL, MVT::i32),
3347 DAG.getSExtOrTrunc(LHS->getOperand(0), DL, VecVT)}),
3348 DL, MVT::i1);
3349 if (RequiresNegate)
3350 Ret = DAG.getNOT(DL, Ret, MVT::i1);
3352}
3353
3354
3355
3359
3363 EVT VT = N->getValueType(0);
3364 EVT OpVT = X.getValueType();
3365
3368 Attribute::NoImplicitFloat))
3370
3372
3374 !Subtarget->hasSIMD128() || !isIntEqualitySetCC(CC))
3376
3377
3378 auto IsVectorBitCastCheap = [](SDValue X) {
3381 };
3382
3383 if (!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y))
3385
3389
3393 : Intrinsic::wasm_anytrue,
3394 DL, MVT::i32),
3395 Cmp});
3396
3399}
3400
3406
3407 EVT VT = N->getValueType(0);
3410
3412 return V;
3413
3415 if (LHS->getOpcode() != ISD::BITCAST)
3417
3418 EVT FromVT = LHS->getOperand(0).getValueType();
3421
3423 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
3425
3428
3430 auto &DAG = DCI.DAG;
3431
3432
3434 N, VecVT, DAG)) {
3435 return Match;
3436 }
3437
3438
3440 N, VecVT, DAG)) {
3441 return Match;
3442 }
3443
3444
3446 N, VecVT, DAG)) {
3447 return Match;
3448 }
3449
3450
3452 N, VecVT, DAG)) {
3453 return Match;
3454 }
3456}
3457
3459 EVT VT = N->getValueType(0);
3460 if (VT != MVT::v8i32 && VT != MVT::v16i32)
3462
3463
3466 if (LHS.getOpcode() != RHS.getOpcode())
3468
3472
3473 if (LHS->getOperand(0).getValueType() != RHS->getOperand(0).getValueType())
3475
3476 EVT FromVT = LHS->getOperand(0).getValueType();
3478 if (EltTy != MVT::i8)
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3503 SDValue ExtendInLHS = LHS->getOperand(0);
3504 SDValue ExtendInRHS = RHS->getOperand(0);
3506 unsigned ExtendLowOpc =
3507 IsSigned ? WebAssemblyISD::EXTEND_LOW_S : WebAssemblyISD::EXTEND_LOW_U;
3508 unsigned ExtendHighOpc =
3509 IsSigned ? WebAssemblyISD::EXTEND_HIGH_S : WebAssemblyISD::EXTEND_HIGH_U;
3510
3511 auto GetExtendLow = [&DAG, &DL, &ExtendLowOpc](EVT VT, SDValue Op) {
3512 return DAG.getNode(ExtendLowOpc, DL, VT, Op);
3513 };
3515 return DAG.getNode(ExtendHighOpc, DL, VT, Op);
3516 };
3517
3518 if (NumElts == 16) {
3519 SDValue LowLHS = GetExtendLow(MVT::v8i16, ExtendInLHS);
3520 SDValue LowRHS = GetExtendLow(MVT::v8i16, ExtendInRHS);
3525 SDValue SubVectors[] = {
3526 GetExtendLow(MVT::v4i32, MulLow),
3528 GetExtendLow(MVT::v4i32, MulHigh),
3530 };
3532 } else {
3533 assert(NumElts == 8);
3534 SDValue LowLHS = DAG.getNode(LHS->getOpcode(), DL, MVT::v8i16, ExtendInLHS);
3535 SDValue LowRHS = DAG.getNode(RHS->getOpcode(), DL, MVT::v8i16, ExtendInRHS);
3537 SDValue Lo = GetExtendLow(MVT::v4i32, MulLow);
3540 }
3542}
3543
3547 EVT VT = N->getValueType(0);
3550
3552 return Res;
3553
3554
3555
3556
3557 if (!DCI.isBeforeLegalize() || (VT != MVT::v8i8 && VT != MVT::v16i8))
3559
3564 EVT MulVT = MVT::v8i16;
3565
3566 if (VT == MVT::v8i8) {
3572 DAG.getNode(WebAssemblyISD::EXTEND_LOW_U, DL, MulVT, PromotedLHS);
3574 DAG.getNode(WebAssemblyISD::EXTEND_LOW_U, DL, MulVT, PromotedRHS);
3577
3579 MVT::v16i8, DL, MulLow, DAG.getUNDEF(MVT::v16i8),
3580 {0, 2, 4, 6, 8, 10, 12, 14, -1, -1, -1, -1, -1, -1, -1, -1});
3582 } else {
3583 assert(VT == MVT::v16i8 && "Expected v16i8");
3584 SDValue LowLHS = DAG.getNode(WebAssemblyISD::EXTEND_LOW_U, DL, MulVT, LHS);
3585 SDValue LowRHS = DAG.getNode(WebAssemblyISD::EXTEND_LOW_U, DL, MulVT, RHS);
3587 DAG.getNode(WebAssemblyISD::EXTEND_HIGH_U, DL, MulVT, LHS);
3589 DAG.getNode(WebAssemblyISD::EXTEND_HIGH_U, DL, MulVT, RHS);
3590
3595
3596
3598 VT, DL, MulLow, MulHigh,
3599 {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
3600 }
3601}
3602
3607 EVT InVT = In.getValueType();
3612 if (NumElems < RequiredNumElems) {
3614 }
3616}
3617
3619 EVT OutVT = N->getValueType(0);
3622
3624 if (OutElTy != MVT::i8 && OutElTy != MVT::i16)
3626
3630
3631 EVT FPVT = N->getOperand(0)->getValueType(0);
3634
3636
3637
3640 SDValue ToInt = DAG.getNode(N->getOpcode(), DL, IntVT, N->getOperand(0));
3643
3646
3648
3649 EVT NarrowedVT = OutElTy == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
3655 } else {
3657 }
3659}
3660
3662WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
3663 DAGCombinerInfo &DCI) const {
3664 switch (N->getOpcode()) {
3665 default:
3667 case ISD::BITCAST:
3678 return ExtCombine;
3696 }
3697}
unsigned const MachineRegisterInfo * MRI
static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static SDValue performTruncateCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Function Alias Analysis Results
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
const HexagonInstrInfo * TII
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
static SDValue performVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
Try to map an integer comparison with size > XLEN to vector instructions before type legalization spl...
const SmallVectorImpl< MachineOperand > & Cond
Contains matchers for matching SelectionDAG nodes and values.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
Definition WebAssemblyISelLowering.cpp:505
static bool callingConvSupported(CallingConv::ID CallConv)
Definition WebAssemblyISelLowering.cpp:1241
static SDValue TryWideExtMulCombine(SDNode *N, SelectionDAG &DAG)
Definition WebAssemblyISelLowering.cpp:3458
static MachineBasicBlock * LowerMemcpy(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
Definition WebAssemblyISelLowering.cpp:600
static std::optional< unsigned > IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG)
Definition WebAssemblyISelLowering.cpp:1769
static SDValue performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Definition WebAssemblyISelLowering.cpp:2929
static SDValue performVectorNonNegToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Definition WebAssemblyISelLowering.cpp:2910
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
Definition WebAssemblyISelLowering.cpp:2744
static SDValue performAnyAllCombine(SDNode *N, SelectionDAG &DAG)
Definition WebAssemblyISelLowering.cpp:3277
static MachineBasicBlock * LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB, const WebAssemblySubtarget *Subtarget, const TargetInstrInfo &TII)
Definition WebAssemblyISelLowering.cpp:776
static SDValue TryMatchTrue(SDNode *N, EVT VecVT, SelectionDAG &DAG)
Definition WebAssemblyISelLowering.cpp:3333
static SDValue GetExtendHigh(SDValue Op, unsigned UserOpc, EVT VT, SelectionDAG &DAG)
Definition WebAssemblyISelLowering.cpp:2277
SDValue performConvertFPCombine(SDNode *N, SelectionDAG &DAG)
Definition WebAssemblyISelLowering.cpp:3618
static SDValue performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Definition WebAssemblyISelLowering.cpp:2976
static bool IsWebAssemblyGlobal(SDValue Op)
Definition WebAssemblyISelLowering.cpp:1762
static MachineBasicBlock * LowerMemset(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
Definition WebAssemblyISelLowering.cpp:692
SDValue DoubleVectorWidth(SDValue In, unsigned RequiredNumElems, SelectionDAG &DAG)
Definition WebAssemblyISelLowering.cpp:3603
static SDValue performVectorExtendToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get split up into scalar instr...
Definition WebAssemblyISelLowering.cpp:2887
static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG)
Definition WebAssemblyISelLowering.cpp:2354
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, const SDLoc &DL, unsigned VectorWidth)
Definition WebAssemblyISelLowering.cpp:3097
static SDValue performBitcastCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Definition WebAssemblyISelLowering.cpp:3204
static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL, SelectionDAG &DAG)
Definition WebAssemblyISelLowering.cpp:3126
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG.
This file provides WebAssembly-specific target descriptions.
This file declares WebAssembly-specific per-machine-function information.
This file declares the WebAssembly-specific subclass of TargetSubtarget.
This file declares the WebAssembly-specific subclass of TargetMachine.
This file contains the declaration of the WebAssembly-specific type parsing utility functions.
This file contains the declaration of the WebAssembly-specific utility functions.
static constexpr int Concat[]
Class for arbitrary precision integers.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
LLVM Basic Block Representation.
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
Diagnostic information for unsupported feature in backend.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
FunctionType * getFunctionType() const
Returns the FunctionType for me.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
int64_t getOffset() const
LLVM_ABI unsigned getAddressSpace() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
ThreadLocalMode getThreadLocalMode() const
Type * getValueType() const
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
Describe properties that are true of each instruction in the target description file.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
@ INVALID_SIMPLE_VALUE_TYPE
static auto integer_fixedlen_vector_valuetypes()
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isFixedLengthVector() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
mop_range defs()
Returns all explicit operands that are register definitions.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mop_range uses()
Returns all operands which may be register uses.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setPartialReduceMLAAction(unsigned Opc, MVT AccVT, MVT InputVT, LegalizeAction Action)
Indicate how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type InputVT should be treate...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isFunctionTy() const
True if this is an instance of FunctionType.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
static std::optional< unsigned > getLocalForStackObject(MachineFunction &MF, int FrameIndex)
bool hasCallIndirectOverlong() const
bool hasReferenceTypes() const
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
Definition WebAssemblyISelLowering.cpp:44
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Definition WebAssemblyISelLowering.cpp:422
MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const override
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
Definition WebAssemblyISelLowering.cpp:431
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Swift
Calling convention for Swift.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ CXX_FAST_TLS
Used for access functions.
@ WASM_EmscriptenInvoke
For emscripten __invoke_* functions.
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
CondCode_match m_SpecificCondCode(ISD::CondCode CC)
Match a conditional code SDNode with a specific ISD::CondCode.
CondCode_match m_CondCode()
Match any conditional code SDNode.
TernaryOpc_match< T0_P, T1_P, T2_P, true, false > m_c_SetCC(const T0_P &LHS, const T1_P &RHS, const T2_P &CC)
MCSymbolWasm * getOrCreateFunctionTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __indirect_function_table, for use in call_indirect and in function bitcasts.
@ WASM_ADDRESS_SPACE_EXTERNREF
@ WASM_ADDRESS_SPACE_FUNCREF
bool isWebAssemblyFuncrefType(const Type *Ty)
Return true if this is a WebAssembly Funcref Type.
bool isWebAssemblyTableType(const Type *Ty)
Return true if the table represents a WebAssembly table type.
MCSymbolWasm * getOrCreateFuncrefCallTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __funcref_call_table, for use in funcref calls when lowered to table.set + call_indirect.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
bool isValidAddressSpace(unsigned AS)
bool canLowerReturn(size_t ResultSize, const WebAssemblySubtarget *Subtarget)
Returns true if the function's return value(s) can be lowered directly, i.e., not indirectly via a po...
bool isWasmVarAddressSpace(unsigned AS)
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
void computeSignatureVTs(const FunctionType *Ty, const Function *TargetFunc, const Function &ContextFunc, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast - Return the argument parameter cast to the specified type.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa - Return true if the parameter to the template is an instance of one of the template type argu...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
void computeLegalValueVTs(const WebAssemblyTargetLowering &TLI, LLVMContext &Ctx, const DataLayout &DL, Type *Ty, SmallVectorImpl< MVT > &ValueVTs)
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInConsecutiveRegs() const
Align getNonZeroOrigAlign() const
bool isSwiftError() const
unsigned getByValSize() const
bool isInConsecutiveRegsLast() const
Align getNonZeroByValAlign() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
bool isBeforeLegalize() const
This structure is used to pass arguments to makeLibCall function.