LLVM: lib/Target/Hexagon/HexagonISelLowering.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
46#include "llvm/IR/IntrinsicsHexagon.h"
58#include
59#include
60#include
61#include
62#include
63
64using namespace llvm;
65
66#define DEBUG_TYPE "hexagon-lowering"
67
70 cl::desc("Control jump table emission on Hexagon target"));
71
74 cl::desc("Enable Hexagon SDNode scheduling"));
75
77 cl::desc("Enable Fast Math processing"));
78
81 cl::desc("Set minimum jump tables"));
82
85 cl::desc("Max #stores to inline memcpy"));
86
89 cl::desc("Max #stores to inline memcpy"));
90
93 cl::desc("Max #stores to inline memmove"));
94
98 cl::desc("Max #stores to inline memmove"));
99
102 cl::desc("Max #stores to inline memset"));
103
106 cl::desc("Max #stores to inline memset"));
107
110 cl::desc("Rewrite unaligned loads as a pair of aligned loads"));
111
115 cl::desc("Disable minimum alignment of 1 for "
116 "arguments passed by value on stack"));
117
118namespace {
119
120 class HexagonCCState : public CCState {
121 unsigned NumNamedVarArgParams = 0;
122
123 public:
126 unsigned NumNamedArgs)
127 : CCState(CC, IsVarArg, MF, locs, C),
128 NumNamedVarArgParams(NumNamedArgs) {}
129 unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
130 };
131
132}
133
134
135
136
140 static const MCPhysReg ArgRegs[] = {
141 Hexagon::R0, Hexagon::R1, Hexagon::R2,
142 Hexagon::R3, Hexagon::R4, Hexagon::R5
143 };
144 const unsigned NumArgRegs = std::size(ArgRegs);
146
147
148 if (RegNum != NumArgRegs && RegNum % 2 == 1)
150
151
152
153
154 return false;
155}
156
157#include "HexagonGenCallingConv.inc"
158
159
162 const {
164}
165
166
167
168
169
170
174 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
176 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
177 false, false,
179}
180
181bool
187 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
188
190 return CCInfo.CheckReturn(Outs, RetCC_Hexagon_HVX);
191 return CCInfo.CheckReturn(Outs, RetCC_Hexagon);
192}
193
194
195
196
199 bool IsVarArg,
203
205
206
209
210
213 else
215
218
219
220 for (unsigned i = 0; i != RVLocs.size(); ++i) {
222 SDValue Val = OutVals[i];
223
225 default:
226
229 break;
232 break;
235 break;
238 break;
241 break;
242 }
243
245
246
249 }
250
251 RetOps[0] = Chain;
252
253
256
258}
259
261
263}
264
267
269 .Case("r0", Hexagon::R0)
270 .Case("r1", Hexagon::R1)
271 .Case("r2", Hexagon::R2)
272 .Case("r3", Hexagon::R3)
273 .Case("r4", Hexagon::R4)
274 .Case("r5", Hexagon::R5)
275 .Case("r6", Hexagon::R6)
276 .Case("r7", Hexagon::R7)
277 .Case("r8", Hexagon::R8)
278 .Case("r9", Hexagon::R9)
279 .Case("r10", Hexagon::R10)
280 .Case("r11", Hexagon::R11)
281 .Case("r12", Hexagon::R12)
282 .Case("r13", Hexagon::R13)
283 .Case("r14", Hexagon::R14)
284 .Case("r15", Hexagon::R15)
285 .Case("r16", Hexagon::R16)
286 .Case("r17", Hexagon::R17)
287 .Case("r18", Hexagon::R18)
288 .Case("r19", Hexagon::R19)
289 .Case("r20", Hexagon::R20)
290 .Case("r21", Hexagon::R21)
291 .Case("r22", Hexagon::R22)
292 .Case("r23", Hexagon::R23)
293 .Case("r24", Hexagon::R24)
294 .Case("r25", Hexagon::R25)
295 .Case("r26", Hexagon::R26)
296 .Case("r27", Hexagon::R27)
297 .Case("r28", Hexagon::R28)
298 .Case("r29", Hexagon::R29)
299 .Case("r30", Hexagon::R30)
300 .Case("r31", Hexagon::R31)
301 .Case("r1:0", Hexagon::D0)
302 .Case("r3:2", Hexagon::D1)
303 .Case("r5:4", Hexagon::D2)
304 .Case("r7:6", Hexagon::D3)
305 .Case("r9:8", Hexagon::D4)
306 .Case("r11:10", Hexagon::D5)
307 .Case("r13:12", Hexagon::D6)
308 .Case("r15:14", Hexagon::D7)
309 .Case("r17:16", Hexagon::D8)
310 .Case("r19:18", Hexagon::D9)
311 .Case("r21:20", Hexagon::D10)
312 .Case("r23:22", Hexagon::D11)
313 .Case("r25:24", Hexagon::D12)
314 .Case("r27:26", Hexagon::D13)
315 .Case("r29:28", Hexagon::D14)
316 .Case("r31:30", Hexagon::D15)
317 .Case("sp", Hexagon::R29)
318 .Case("fp", Hexagon::R30)
319 .Case("lr", Hexagon::R31)
320 .Case("p0", Hexagon::P0)
321 .Case("p1", Hexagon::P1)
322 .Case("p2", Hexagon::P2)
323 .Case("p3", Hexagon::P3)
324 .Case("sa0", Hexagon::SA0)
325 .Case("lc0", Hexagon::LC0)
326 .Case("sa1", Hexagon::SA1)
327 .Case("lc1", Hexagon::LC1)
328 .Case("m0", Hexagon::M0)
329 .Case("m1", Hexagon::M1)
330 .Case("usr", Hexagon::USR)
331 .Case("ugp", Hexagon::UGP)
332 .Case("cs0", Hexagon::CS0)
333 .Case("cs1", Hexagon::CS1)
335 if (Reg)
336 return Reg;
337
339}
340
341
342
343
344
345
351
353
356
359 else
361
362
363 for (unsigned i = 0; i != RVLocs.size(); ++i) {
365 if (RVLocs[i].getValVT() == MVT::i1) {
366
367
368
369
370
373 MVT::i32, Glue);
374
375 Register PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
378
379
380
381
385 } else {
386 RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
387 RVLocs[i].getValVT(), Glue);
390 }
392 }
393
394 return Chain;
395}
396
397
398
410 bool IsVarArg = CLI.IsVarArg;
412
413 bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
417
421
422
424
425
427 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext(),
428 NumParams);
429
431 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX);
433 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_Legacy);
434 else
435 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
436
440 IsVarArg, IsStructRet, StructAttrFlag, Outs,
441 OutVals, Ins, DAG);
443 if (VA.isMemLoc()) {
445 break;
446 }
447 }
449 : "Argument must be passed on stack. "
450 "Not eligible for Tail Call\n"));
451 }
452
453 unsigned NumBytes = CCInfo.getStackSize();
456
460
461 bool NeedsArgAlign = false;
462 Align LargestAlignSeen;
463
464 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
466 SDValue Arg = OutVals[i];
468
470 NeedsArgAlign |= ArgAlign;
471
472
474 default:
475
478 break;
481 break;
484 break;
487 break;
490 break;
491 }
492
496 StackPtr.getValueType());
497 MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);
498 if (ArgAlign)
499 LargestAlignSeen = std::max(
501 if (Flags.isByVal()) {
502
503
505 Flags, DAG, dl));
506 } else {
509 SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI);
511 }
512 continue;
513 }
514
515
516
519 }
520
521 if (NeedsArgAlign && Subtarget.hasV60Ops()) {
522 LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
523 Align VecAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
524 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
526 }
527
528
529 if (!MemOpChains.empty())
531
536 }
537
538
539
540
541
543 for (const auto &R : RegsToPass) {
544 Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue);
546 }
547 } else {
548
549
550
551
552
553
554
555
556
557
559 for (const auto &R : RegsToPass) {
560 Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue);
562 }
564 }
565
568
569
570
571
575 dyn_cast(Callee)) {
577 }
578
579
583
584
585
586 for (const auto &R : RegsToPass)
588
590 assert(Mask && "Missing call preserved mask for calling convention");
592
595
599 }
600
601
602
603
605
607 Chain = DAG.getNode(OpCode, dl, {MVT::Other, MVT::Glue}, Ops);
609
610
611 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, Glue, dl);
613
614
615
616 return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG,
617 InVals, OutVals, Callee);
618}
619
620
621
622
627 if (!LSN)
628 return false;
631 return false;
632 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
633 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
634 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
635 VT == MVT::v4i16 || VT == MVT::v8i8 ||
637 if (!IsLegalType)
638 return false;
639
641 return false;
644 if (!isa(Offset.getNode()))
645 return false;
647
648 int32_t V = cast(Offset.getNode())->getSExtValue();
650}
651
655 else
656 return Op;
657}
658
664 unsigned LR = HRI.getRARegister();
665
668 return Op;
669
671 if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue)
672 --NumOps;
673
676 unsigned NumVals = Flags.getNumOperandRegisters();
677 ++i;
678
679 switch (Flags.getKind()) {
680 default:
685 i += NumVals;
686 break;
690 for (; NumVals; --NumVals, ++i) {
691 Register Reg = cast(Op.getOperand(i))->getReg();
692 if (Reg != LR)
693 continue;
694 HMFI.setHasClobberLR(true);
695 return Op;
696 }
697 break;
698 }
699 }
700 }
701
702 return Op;
703}
704
705
706
707
710 SDValue Chain = Op.getOperand(0);
712
713
717}
718
719
720
721
722
725 SDValue Chain = Op.getOperand(0);
729}
730
731
732
733
734
737 SDValue Chain = Op.getOperand(0);
741}
742
745 SDValue Chain = Op.getOperand(0);
746 unsigned IntNo = Op.getConstantOperandVal(1);
747
748 if (IntNo == Intrinsic::hexagon_prefetch) {
753 }
755}
756
760 SDValue Chain = Op.getOperand(0);
764
766 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC");
767
770
771 if (A == 0)
772 A = HFI.getStackAlign().value();
773
775 dbgs () << __func__ << " Align: " << A << " Size: ";
776 Size.getNode()->dump(&DAG);
777 dbgs() << "\n";
778 });
779
783
785 return AA;
786}
787
795
796
798
799
801 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs,
804
806 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX);
808 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_Legacy);
809 else
810 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
811
812
813
814
815
816
817
819 switch (RC.getID()) {
820 case Hexagon::IntRegsRegClassID:
821 return Reg - Hexagon::R0 + 1;
822 case Hexagon::DoubleRegsRegClassID:
823 return (Reg - Hexagon::D0 + 1) * 2;
824 case Hexagon::HvxVRRegClassID:
825 return Reg - Hexagon::V0 + 1;
826 case Hexagon::HvxWRRegClassID:
827 return (Reg - Hexagon::W0 + 1) * 2;
828 }
830 };
831
834 HFL.FirstVarArgSavedReg = 0;
836
837 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
840 bool ByVal = Flags.isByVal();
841
842
843
844
845
846 if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8)
848
849 bool InReg = VA.isRegLoc() &&
850 (!ByVal || (ByVal && Flags.getByValSize() > 8));
851
852 if (InReg) {
856
858 Register VReg = MRI.createVirtualRegister(RC);
860
861
862
863
864 if (VA.getValVT() == MVT::i1) {
870 } else {
871#ifndef NDEBUG
875#endif
876 }
879 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg());
880 } else {
881 assert(VA.isMemLoc() && "Argument should be passed in memory");
882
883
884
885 unsigned ObjSize = Flags.isByVal()
886 ? Flags.getByValSize()
888
889
893
894 if (Flags.isByVal()) {
895
896
897
899 } else {
903 }
904 }
905 }
906
908 for (int i = HFL.FirstVarArgSavedReg; i < 6; i++)
909 MRI.addLiveIn(Hexagon::R0+i);
910 }
911
913 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
915
916
917 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
918 bool RequiresPadding = (NumVarArgRegs & 1);
919 int RegSaveAreaSizePlusPadding = RequiresPadding
920 ? (NumVarArgRegs + 1) * 4
921 : NumVarArgRegs * 4;
922
923 if (RegSaveAreaSizePlusPadding > 0) {
924
926 if (!(RegAreaStart % 8))
927 RegAreaStart = (RegAreaStart + 7) & -8;
928
929 int RegSaveAreaFrameIndex =
930 MFI.CreateFixedObject(RegSaveAreaSizePlusPadding, RegAreaStart, true);
931 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
932
933
934 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
936 HMFI.setVarArgsFrameIndex(FI);
937 } else {
938
939
942 HMFI.setRegSavedAreaStartFrameIndex(FI);
943 HMFI.setVarArgsFrameIndex(FI);
944 }
945 }
946
947
949
952 HMFI.setVarArgsFrameIndex(FI);
953 }
954
955 return Chain;
956}
957
960
961
965 const Value *SV = cast(Op.getOperand(2))->getValue();
966
970 }
975
976
978
979
980
981
982
983
984
985 SDValue SavedRegAreaStartFrameIndex =
986 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32);
987
989
990 if (HFL.FirstVarArgSavedReg & 1)
991 SavedRegAreaStartFrameIndex =
993 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(),
994 MVT::i32),
996
997
1000 SavedRegAreaStartFrameIndex,
1003
1004
1008 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
1009 PtrVT),
1012
1013
1017 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
1018 PtrVT),
1021
1023}
1024
1027
1029 SDValue Chain = Op.getOperand(0);
1030 SDValue DestPtr = Op.getOperand(1);
1031 SDValue SrcPtr = Op.getOperand(2);
1032 const Value *DestSV = cast(Op.getOperand(3))->getValue();
1033 const Value *SrcSV = cast(Op.getOperand(4))->getValue();
1035
1036
1039 false, false, nullptr, std::nullopt,
1041}
1042
1047 ISD::CondCode CC = cast(Op.getOperand(2))->get();
1050
1051 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1056 return DAG.getSetCC(dl, ResTy,
1059 }
1060
1061
1063 return Op;
1064
1065
1066
1067
1068 auto isSExtFree = [this](SDValue N) {
1069 switch (N.getOpcode()) {
1071
1074 return false;
1075 EVT OrigTy = cast(Op.getOperand(1))->getVT();
1078
1079
1080
1081 return ThisBW >= OrigBW;
1082 }
1084
1085 return true;
1086 }
1087 return false;
1088 };
1089
1090 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
1092 bool IsNegative = C && C->getAPIntValue().isNegative();
1093 if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS))
1094 return DAG.getSetCC(dl, ResTy,
1097 }
1098
1100}
1101
1104 SDValue PredOp = Op.getOperand(0);
1105 SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);
1106 MVT OpTy = ty(Op1);
1108
1109 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1114
1116 DAG.getSelect(dl, WideTy, PredOp,
1119 dl, OpTy);
1120 }
1121
1123}
1124
1127 EVT ValTy = Op.getValueType();
1130 bool isVTi1Type = false;
1131 if (auto *CV = dyn_cast(CPN->getConstVal())) {
1132 if (cast(CV->getType())->getElementType()->isIntegerTy(1)) {
1135 unsigned VecLen = CV->getNumOperands();
1137 "conversion only supported for pow2 VectorSize");
1138 for (unsigned i = 0; i < VecLen; ++i)
1139 NewConst.push_back(IRB.getInt8(CV->getOperand(i)->isZeroValue()));
1140
1142 isVTi1Type = true;
1143 }
1144 }
1148
1149 unsigned Offset = 0;
1154 else if (isVTi1Type)
1156 else
1158 TF);
1159
1160 assert(cast(T)->getTargetFlags() == TF &&
1161 "Inconsistent target flag encountered");
1162
1163 if (IsPositionIndependent)
1166}
1167
1170 EVT VT = Op.getValueType();
1171 int Idx = cast(Op)->getIndex();
1175 }
1176
1179}
1180
1187
1190
1191 EVT VT = Op.getValueType();
1193 unsigned Depth = Op.getConstantOperandVal(0);
1200 }
1201
1202
1205}
1206
1212
1213 EVT VT = Op.getValueType();
1215 unsigned Depth = Op.getConstantOperandVal(0);
1221 return FrameAddr;
1222}
1223
1228}
1229
1233 auto *GAN = cast(Op);
1235 auto *GV = GAN->getGlobal();
1236 int64_t Offset = GAN->getOffset();
1237
1240
1244 if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1247 }
1248
1250 if (UsePCRel) {
1254 }
1255
1256
1261}
1262
1263
1266 const BlockAddress *BA = cast(Op)->getBlockAddress();
1269
1274 }
1275
1278}
1279
1282 const {
1287}
1288
1292 unsigned char OperandFlags) const {
1300 OperandFlags);
1301
1302
1303
1304
1305
1306
1307
1308
1311 assert(Mask && "Missing call preserved mask for calling convention");
1315
1316
1318
1320 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
1321}
1322
1323
1324
1325
1332
1333
1335
1337 unsigned char TF =
1339
1340
1343
1345
1346 if (IsPositionIndependent) {
1347
1349
1350
1351
1353 }
1354
1355
1356
1359
1360
1361
1362 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset);
1363}
1364
1365
1366
1367
1374
1375
1377
1381
1382
1383
1385}
1386
1387
1388
1389
1396
1397
1400
1401
1403
1404
1407
1408
1412
1416
1418 Hexagon::R0, Flags);
1419}
1420
1421
1422
1423
1424
1425
1430
1439 }
1441}
1442
1443
1444
1445
1446
1450 Subtarget(ST) {
1452
1459
1462
1465 else
1467
1468
1475
1476
1477
1478
1479
1481 addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass);
1482 addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass);
1483 addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass);
1491
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1525
1526
1530
1531
1536
1537
1543 else
1545
1549
1552 else
1555
1556 for (unsigned LegalIntOp :
1560 }
1561
1562
1563
1571 }
1574
1579
1580
1585
1590
1595
1596 for (unsigned IntExpOp :
1603 }
1604
1605 for (unsigned FPExpOp :
1610 }
1611
1612
1617 }
1618
1620
1623
1624
1628 }
1632 }
1634
1635
1636
1637
1638
1639
1640
1641 static const unsigned VectExpOps[] = {
1642
1646
1649
1656
1658
1664 };
1665
1667 for (unsigned VectExpOp : VectExpOps)
1669
1670
1672 if (TargetVT == VT)
1673 continue;
1678 }
1679
1680
1681 if (VT.getVectorElementType() != MVT::i32) {
1685 }
1689 }
1690
1691
1692
1699
1703
1704
1705 for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1706 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1713
1720
1721 if (NativeVT.getVectorElementType() != MVT::i1) {
1725 }
1726 }
1727
1728 for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) {
1733 }
1734
1735
1736
1737
1738
1739 for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1740 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1743 }
1744
1745
1746 for (MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1749 }
1750
1751
1752 for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1753 MVT::v2i32}) {
1761 }
1762
1763
1764 for (MVT VT : {MVT::i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1769 }
1770
1771
1779
1780
1786
1789
1802
1803
1804
1809
1814
1815
1816
1817 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1818 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1821 }
1822
1823
1824
1830 }
1834 }
1839 }
1840
1844
1846 initializeHVXLowering();
1847
1849
1850
1851
1852
1854
1855 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1856 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1857 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1858 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1859 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1860 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1861 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1862 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1863
1864
1865 if (FastMath)
1866 setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2");
1867
1868
1869
1870
1871 if (FastMath) {
1872 setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3");
1873 setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3");
1874 setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3");
1875 setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3");
1876 setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3");
1877 } else {
1883 }
1884
1885 if (FastMath)
1886 setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf");
1887 else
1889
1890
1891 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
1892 setLibcallName(RTLIB::FPROUND_F64_F16, "__truncdfhf2");
1893 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
1894}
1895
1950 }
1951 return nullptr;
1952}
1953
1954bool
1955HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, Align NeedAlign,
1957 auto *CA = dyn_cast(Ptr);
1958 if (!CA)
1959 return true;
1960 unsigned Addr = CA->getZExtValue();
1961 Align HaveAlign =
1963 if (HaveAlign >= NeedAlign)
1964 return true;
1965
1967
1968 struct DiagnosticInfoMisalignedTrap : public DiagnosticInfo {
1969 DiagnosticInfoMisalignedTrap(StringRef M)
1972 DP << Msg;
1973 }
1975 return DI->getKind() == DK_MisalignedTrap;
1976 }
1978 };
1979
1980 std::string ErrMsg;
1982 O << "Misaligned constant address: " << format_hex(Addr, 10)
1983 << " has alignment " << HaveAlign.value()
1984 << ", but the memory access requires " << NeedAlign.value();
1987 O << ". The instruction has been replaced with a trap.";
1988
1990 return false;
1991}
1992
1995 const {
1997 auto *LS = cast(Op.getNode());
1998 assert(->isIndexed() && "Not expecting indexed ops on constant address");
1999
2004 return Trap;
2005}
2006
2007
2008
2010 unsigned ID = cast(Inst)->getIntrinsicID();
2011 return (ID == Intrinsic::hexagon_L2_loadrd_pbr ||
2012 ID == Intrinsic::hexagon_L2_loadri_pbr ||
2013 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
2014 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
2015 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
2016 ID == Intrinsic::hexagon_L2_loadrub_pbr);
2017}
2018
2019
2020
2021
2025 V = cast(V)->getOperand(0);
2027 V = cast(V)->getOperand(0);
2028 return V;
2029}
2030
2031
2032
2033
2036 int Idx = -1;
2039
2040 if (Blk == Parent) {
2043
2044 do {
2045 BaseVal = BackEdgeVal;
2047 } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
2048
2049
2050 if (IntrBaseVal == BackEdgeVal)
2051 continue;
2052 Idx = i;
2053 break;
2054 } else
2055 Idx = i;
2056 }
2057 assert(Idx >= 0 && "Unexpected index to incoming argument in PHI");
2059}
2060
2061
2062
2063
2065 Value *IntrBaseVal = V;
2067
2068
2069 do {
2070 BaseVal = V;
2072 } while (BaseVal != V);
2073
2074
2075 if (const PHINode *PN = dyn_cast(V))
2077
2078 else
2079 return V;
2080}
2081
2082
2083
2084
2085
2089 unsigned Intrinsic) const {
2090 switch (Intrinsic) {
2091 case Intrinsic::hexagon_L2_loadrd_pbr:
2092 case Intrinsic::hexagon_L2_loadri_pbr:
2093 case Intrinsic::hexagon_L2_loadrh_pbr:
2094 case Intrinsic::hexagon_L2_loadruh_pbr:
2095 case Intrinsic::hexagon_L2_loadrb_pbr:
2096 case Intrinsic::hexagon_L2_loadrub_pbr: {
2098 auto &DL = I.getDataLayout();
2099 auto &Cont = I.getCalledFunction()->getParent()->getContext();
2100
2101
2102
2103 Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(0);
2107
2108
2109 Info.offset = 0;
2110 Info.align = DL.getABITypeAlign(Info.memVT.getTypeForEVT(Cont));
2112 return true;
2113 }
2114 case Intrinsic::hexagon_V6_vgathermw:
2115 case Intrinsic::hexagon_V6_vgathermw_128B:
2116 case Intrinsic::hexagon_V6_vgathermh:
2117 case Intrinsic::hexagon_V6_vgathermh_128B:
2118 case Intrinsic::hexagon_V6_vgathermhw:
2119 case Intrinsic::hexagon_V6_vgathermhw_128B:
2120 case Intrinsic::hexagon_V6_vgathermwq:
2121 case Intrinsic::hexagon_V6_vgathermwq_128B:
2122 case Intrinsic::hexagon_V6_vgathermhq:
2123 case Intrinsic::hexagon_V6_vgathermhq_128B:
2124 case Intrinsic::hexagon_V6_vgathermhwq:
2125 case Intrinsic::hexagon_V6_vgathermhwq_128B: {
2126 const Module &M = *I.getParent()->getParent()->getParent();
2128 Type *VecTy = I.getArgOperand(1)->getType();
2130 Info.ptrVal = I.getArgOperand(0);
2131 Info.offset = 0;
2132 Info.align =
2133 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);
2137 return true;
2138 }
2139 default:
2140 break;
2141 }
2142 return false;
2143}
2144
2146 return X.getValueType().isScalarInteger();
2147}
2148
2151}
2152
2155 return false;
2157}
2158
2162}
2163
2164
2166 unsigned DefinedValues) const {
2167 return false;
2168}
2169
2171 unsigned Index) const {
2174 return false;
2175
2178 return true;
2179
2180
2182}
2183
2187}
2188
2190 EVT VT) const {
2191 return true;
2192}
2193
2198
2201
2203 unsigned Action = getPreferredHvxVectorAction(VT);
2204 if (Action != ~0u)
2206 }
2207
2208
2209 if (ElemTy == MVT::i1)
2211
2212
2213
2216
2218}
2219
2223 unsigned Action = getCustomHvxOperationAction(Op);
2224 if (Action != ~0u)
2226 }
2228}
2229
2230std::pair<SDValue, int>
2231HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const {
2234 if (auto *CN = dyn_cast(Op1.getNode()))
2235 return { Addr.getOperand(0), CN->getSExtValue() };
2236 }
2237 return { Addr, 0 };
2238}
2239
2240
2241
2244 const {
2245 const auto *SVN = cast(Op);
2247 assert(AM.size() <= 8 && "Unexpected shuffle mask");
2248 unsigned VecLen = AM.size();
2249
2252 "HVX shuffles should be legal");
2254
2258
2259
2260
2261
2262 if (ty(Op0) != VecTy || ty(Op1) != VecTy)
2264
2265
2266
2268 unsigned F = llvm::find_if(AM, [](int M) { return M >= 0; }) - AM.data();
2271 if (AM[F] >= int(VecLen)) {
2274 }
2275
2276
2279 for (int M : Mask) {
2280 if (M < 0) {
2281 for (unsigned j = 0; j != ElemBytes; ++j)
2283 } else {
2284 for (unsigned j = 0; j != ElemBytes; ++j)
2285 ByteMask.push_back(M*ElemBytes + j);
2286 }
2287 }
2289
2290
2291
2292
2293
2294
2297 for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) {
2298 unsigned S = 8*i;
2299 uint64_t M = ByteMask[i] & 0xFF;
2300 if (M == 0xFF)
2301 MaskUnd |= M << S;
2302 MaskIdx |= M << S;
2303 }
2304
2305 if (ByteMask.size() == 4) {
2306
2307 if (MaskIdx == (0x03020100 | MaskUnd))
2308 return Op0;
2309
2310 if (MaskIdx == (0x00010203 | MaskUnd)) {
2314 }
2315
2316
2318 getCombine(Op1, Op0, dl, typeJoin({ty(Op1), ty(Op0)}), DAG);
2319 if (MaskIdx == (0x06040200 | MaskUnd))
2320 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
2321 if (MaskIdx == (0x07050301 | MaskUnd))
2322 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
2323
2325 getCombine(Op0, Op1, dl, typeJoin({ty(Op0), ty(Op1)}), DAG);
2326 if (MaskIdx == (0x02000604 | MaskUnd))
2327 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
2328 if (MaskIdx == (0x03010705 | MaskUnd))
2329 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
2330 }
2331
2332 if (ByteMask.size() == 8) {
2333
2334 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2335 return Op0;
2336
2337 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2341 }
2342
2343
2344 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2345 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
2346 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2347 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
2348 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2349 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
2350 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2351 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
2352 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2353 VectorPair P = opSplit(Op0, dl, DAG);
2354 return getInstr(Hexagon::S2_packhl, dl, VecTy, {P.second, P.first}, DAG);
2355 }
2356
2357
2358 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2359 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2360 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2361 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2362 }
2363
2365}
2366
2369 switch (Op.getOpcode()) {
2371 if (SDValue S = cast(Op)->getSplatValue())
2372 return S;
2373 break;
2375 return Op.getOperand(0);
2376 }
2378}
2379
2380
2383 const {
2384 unsigned NewOpc;
2385 switch (Op.getOpcode()) {
2388 break;
2391 break;
2394 break;
2395 default:
2397 }
2398
2399 if (SDValue Sp = getSplatValue(Op.getOperand(1), DAG))
2400 return DAG.getNode(NewOpc, SDLoc(Op), ty(Op), Op.getOperand(0), Sp);
2402}
2403
2407
2408
2409
2410
2412 if (SDValue S = getVectorShiftByInt(Op, DAG))
2413 Res = S;
2414
2416 switch (Opc) {
2420 break;
2421 default:
2422
2424 }
2425
2426 MVT ResTy = ty(Res);
2428 return Res;
2429
2430
2433
2434 auto ShiftPartI8 = [&dl, &DAG, this](unsigned Opc, SDValue V, SDValue A) {
2435 MVT Ty = ty(V);
2441 };
2442
2444 return ShiftPartI8(Opc, Val, Amt);
2445
2446 auto [LoV, HiV] = opSplit(Val, dl, DAG);
2448 {ShiftPartI8(Opc, LoV, Amt), ShiftPartI8(Opc, HiV, Amt)});
2449}
2450
2453 if (isa(Op.getOperand(1).getNode()))
2454 return Op;
2456}
2457
2461 SDValue InpV = Op.getOperand(0);
2462 MVT InpTy = ty(InpV);
2465
2466
2467 if (InpTy == MVT::i8) {
2468 if (ResTy == MVT::v8i1) {
2471 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2472 }
2474 }
2475
2476 return Op;
2477}
2478
2479bool
2480HexagonTargetLowering::getBuildVectorConstInts(ArrayRef Values,
2486 bool AllConst = true;
2487
2488 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2490 if (V.isUndef()) {
2491 Consts[i] = ConstantInt::get(IntTy, 0);
2492 continue;
2493 }
2494
2495 if (auto *CN = dyn_cast(V.getNode())) {
2496 const ConstantInt *CI = CN->getConstantIntValue();
2498 } else if (auto *CN = dyn_cast(V.getNode())) {
2499 const ConstantFP *CF = CN->getConstantFPValue();
2501 Consts[i] = ConstantInt::get(IntTy, A.getZExtValue());
2502 } else {
2503 AllConst = false;
2504 }
2505 }
2506 return AllConst;
2507}
2508
2514
2516 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2517
2518 unsigned First, Num = Elem.size();
2520 if (!isUndef(Elem[First]))
2521 break;
2522 }
2523 if (First == Num)
2525
2526 if (AllConst &&
2528 return getZero(dl, VecTy, DAG);
2529
2530 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2532 if (AllConst) {
2533
2534
2535 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2536 Consts[1]->getZExtValue() << 16;
2538 }
2540 if (ElemTy == MVT::f16) {
2543 } else {
2544 E0 = Elem[0];
2545 E1 = Elem[1];
2546 }
2547 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {E1, E0}, DAG);
2549 }
2550
2551 if (ElemTy == MVT::i8) {
2552
2553 if (AllConst) {
2554 uint32_t V = (Consts[0]->getZExtValue() & 0xFF) |
2555 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2556 (Consts[2]->getZExtValue() & 0xFF) << 16 |
2557 Consts[3]->getZExtValue() << 24;
2559 }
2560
2561
2562 bool IsSplat = true;
2563 for (unsigned i = First+1; i != Num; ++i) {
2564 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2565 continue;
2566 IsSplat = false;
2567 break;
2568 }
2569 if (IsSplat) {
2570
2573 }
2574
2575
2576
2577
2580 for (unsigned i = 0; i != 4; ++i) {
2583 }
2589
2590 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2591 return DAG.getBitcast(MVT::v4i8, R);
2592 }
2593
2594#ifndef NDEBUG
2595 dbgs() << "VecTy: " << VecTy << '\n';
2596#endif
2598}
2599
2605
2607 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2608
2609 unsigned First, Num = Elem.size();
2611 if (!isUndef(Elem[First]))
2612 break;
2613 }
2614 if (First == Num)
2616
2617 if (AllConst &&
2619 return getZero(dl, VecTy, DAG);
2620
2621
2622 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2623 bool IsSplat = true;
2624 for (unsigned i = First+1; i != Num; ++i) {
2625 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2626 continue;
2627 IsSplat = false;
2628 break;
2629 }
2630 if (IsSplat) {
2631
2636 }
2637 }
2638
2639
2640 if (AllConst) {
2644 for (unsigned i = 0; i != Num; ++i)
2645 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask);
2648 }
2649
2650
2652 SDValue L = (ElemTy == MVT::i32)
2653 ? Elem[0]
2654 : buildVector32(Elem.take_front(Num/2), dl, HalfTy, DAG);
2655 SDValue H = (ElemTy == MVT::i32)
2656 ? Elem[1]
2657 : buildVector32(Elem.drop_front(Num/2), dl, HalfTy, DAG);
2658 return getCombine(H, L, dl, VecTy, DAG);
2659}
2660
2662HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV,
2665 MVT VecTy = ty(VecV);
2669 return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG);
2670
2674 assert((VecWidth % ElemWidth) == 0);
2675 assert(VecWidth == 32 || VecWidth == 64);
2676
2677
2678 MVT ScalarTy = tyScalar(VecTy);
2679 VecV = DAG.getBitcast(ScalarTy, VecV);
2680
2683
2684 if (auto *IdxN = dyn_cast(IdxV)) {
2685 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2686 if (VecWidth == 64 && ValWidth == 32) {
2687 assert(Off == 0 || Off == 32);
2688 ExtV = Off == 0 ? LoHalf(VecV, DAG) : HiHalf(VecV, DAG);
2689 } else if (Off == 0 && (ValWidth % 8) == 0) {
2691 } else {
2693
2694
2696 {VecV, WidthV, OffV});
2697 }
2698 } else {
2699 if (ty(IdxV) != MVT::i32)
2702 DAG.getConstant(ElemWidth, dl, MVT::i32));
2704 {VecV, WidthV, OffV});
2705 }
2706
2707
2708 ExtV = DAG.getZExtOrTrunc(ExtV, dl, tyScalar(ResTy));
2710 return ExtV;
2711}
2712
2714HexagonTargetLowering::extractVectorPred(SDValue VecV, SDValue IdxV,
2717
2718
2719 MVT VecTy = ty(VecV);
2723 "Vector elements should equal vector width size");
2724 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);
2725
2726
2728
2729
2730
2732 }
2733
2734
2735 if (ValWidth == 1) {
2736 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2740 }
2741
2742
2743
2744
2745
2746 unsigned Scale = VecWidth / ValWidth;
2747
2748
2749
2750 assert(ty(IdxV) == MVT::i32);
2751 unsigned VecRep = 8 / VecWidth;
2753 DAG.getConstant(8*VecRep, dl, MVT::i32));
2756 while (Scale > 1) {
2757
2758
2759 T1 = LoHalf(T1, DAG);
2760 T1 = expandPredicate(T1, dl, DAG);
2761 Scale /= 2;
2762 }
2763
2765}
2766
2771 MVT VecTy = ty(VecV);
2773 return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG);
2774
2777 assert(VecWidth == 32 || VecWidth == 64);
2778 assert((VecWidth % ValWidth) == 0);
2779
2780
2782
2783
2786 VecV = DAG.getBitcast(ScalarTy, VecV);
2787 if (VW != VecWidth)
2789
2792
2793 if (ConstantSDNode *C = dyn_cast(IdxV)) {
2794 unsigned W = C->getZExtValue() * ValWidth;
2797 {VecV, ValV, WidthV, OffV});
2798 } else {
2799 if (ty(IdxV) != MVT::i32)
2803 {VecV, ValV, WidthV, OffV});
2804 }
2805
2807}
2808
2810HexagonTargetLowering::insertVectorPred(SDValue VecV, SDValue ValV,
2813 MVT VecTy = ty(VecV);
2815
2816 if (ValTy == MVT::i1) {
2817 SDValue ToReg = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2823 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Ins}, DAG);
2824 }
2825
2830
2833
2834 for (unsigned R = Scale; R > 1; R /= 2) {
2835 ValR = contractPredicate(ValR, dl, DAG);
2836 ValR = getCombine(DAG.getUNDEF(MVT::i32), ValR, dl, MVT::i64, DAG);
2837 }
2838
2845}
2846
2848HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl,
2850 assert(ty(Vec32).getSizeInBits() == 32);
2851 if (isUndef(Vec32))
2852 return DAG.getUNDEF(MVT::i64);
2856}
2857
2859HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl,
2861 assert(ty(Vec64).getSizeInBits() == 64);
2862 if (isUndef(Vec64))
2863 return DAG.getUNDEF(MVT::i32);
2864
2867 {0, 2, 4, 6, 1, 3, 5, 7});
2868 return extractVector(S, DAG.getConstant(0, dl, MVT::i32), dl, MVT::v4i8,
2869 MVT::i32, DAG);
2870}
2871
2874 const {
2877 if (W <= 64)
2880 }
2881
2887}
2888
2891 const {
2892 MVT ValTy = ty(Val);
2894
2897 if (ValLen == ResLen)
2898 return Val;
2899
2900 const SDLoc &dl(Val);
2901 assert(ValLen < ResLen);
2902 assert(ResLen % ValLen == 0);
2903
2905 for (unsigned i = 1, e = ResLen / ValLen; i < e; ++i)
2907
2909}
2910
2916
2922 }
2923
2931}
2932
2941
2942 if (BW == 32)
2943 return buildVector32(Ops, dl, VecTy, DAG);
2944 if (BW == 64)
2945 return buildVector64(Ops, dl, VecTy, DAG);
2946
2947 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2948
2949 bool All0 = true, All1 = true;
2951 auto *CN = dyn_cast(P.getNode());
2952 if (CN == nullptr) {
2953 All0 = All1 = false;
2954 break;
2955 }
2956 uint32_t C = CN->getZExtValue();
2957 All0 &= (C == 0);
2958 All1 &= (C == 1);
2959 }
2960 if (All0)
2962 if (All1)
2964
2965
2966
2967
2969 SDValue Z = getZero(dl, MVT::i32, DAG);
2970
2972 for (unsigned i = 0; i != 8; ++i) {
2974 Rs[i] = DAG.getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);
2975 }
2977 for (unsigned i = 0, e = A.size()/2; i != e; ++i)
2978 Rs[i] = DAG.getNode(ISD::OR, dl, MVT::i32, Rs[2*i], Rs[2*i+1]);
2979 }
2980
2981 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
2982 }
2983
2985}
2986
2994 return getCombine(Op.getOperand(1), Op.getOperand(0), dl, VecTy, DAG);
2995 }
2996
2998 if (ElemTy == MVT::i1) {
2999 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);
3000 MVT OpTy = ty(Op.getOperand(0));
3001
3002
3005
3006
3007
3008
3009
3011 unsigned IdxW = 0;
3012
3013 for (SDValue P : Op.getNode()->op_values()) {
3015 for (unsigned R = Scale; R > 1; R /= 2) {
3016 W = contractPredicate(W, dl, DAG);
3017 W = getCombine(DAG.getUNDEF(MVT::i32), W, dl, MVT::i64, DAG);
3018 }
3019 W = LoHalf(W, DAG);
3021 }
3022
3023 while (Scale > 2) {
3025 Words[IdxW ^ 1].clear();
3026
3027 for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) {
3028 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
3029
3031 {W0, W1, WidthV, WidthV});
3033 }
3034 IdxW ^= 1;
3035 Scale /= 2;
3036 }
3037
3038
3039 assert(Scale == 2 && Words[IdxW].size() == 2);
3040
3041 SDValue WW = getCombine(Words[IdxW][1], Words[IdxW][0], dl, MVT::i64, DAG);
3043 }
3044
3046}
3047
3053 return extractVector(Vec, Op.getOperand(1), SDLoc(Op), ElemTy, ty(Op), DAG);
3054}
3055
3059 return extractVector(Op.getOperand(0), Op.getOperand(1), SDLoc(Op),
3061}
3062
3066 return insertVector(Op.getOperand(0), Op.getOperand(1), Op.getOperand(2),
3068}
3069
3073 SDValue ValV = Op.getOperand(1);
3074 return insertVector(Op.getOperand(0), ValV, Op.getOperand(2),
3075 SDLoc(Op), ty(ValV), DAG);
3076}
3077
3078bool
3080
3081
3083 return false;
3084
3085
3086
3087
3089}
3090
3095 LoadSDNode *LN = cast(Op.getNode());
3098
3099 bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1;
3100 if (LoadPred) {
3106 LN = cast(NL.getNode());
3107 }
3108
3110 if (!validateConstPtrAlignment(LN->getBasePtr(), ClaimAlign, dl, DAG))
3111 return replaceMemWithUndef(Op, DAG);
3112
3113
3114
3116 if (LoadPred) {
3117 SDValue TP = getInstr(Hexagon::C2_tfrrp, dl, MemTy, {LU}, DAG);
3122 }
3123 SDValue Ch = cast(LU.getNode())->getChain();
3125 }
3126 return LU;
3127}
3128
3132 StoreSDNode *SN = cast(Op.getNode());
3134 MVT Ty = ty(Val);
3135
3136 if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) {
3137
3138 SDValue TR = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {Val}, DAG);
3144 }
3145 SN = cast(NS.getNode());
3146 }
3147
3149 if (!validateConstPtrAlignment(SN->getBasePtr(), ClaimAlign, dl, DAG))
3150 return replaceMemWithUndef(Op, DAG);
3151
3154 if (ClaimAlign < NeedAlign)
3157}
3158
3161 const {
3162 LoadSDNode *LN = cast(Op.getNode());
3166 if (HaveAlign >= NeedAlign)
3167 return Op;
3168
3172
3173
3174
3175 bool DoDefault = false;
3176
3178 DoDefault = true;
3179
3183 return Op;
3184 DoDefault = true;
3185 }
3186 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
3187
3190 DoDefault =
3192 }
3193 if (DoDefault) {
3196 }
3197
3198
3199
3200
3201
3202
3204
3205 unsigned LoadLen = NeedAlign;
3208 auto BO = getBaseAndOffset(Base);
3209 unsigned BaseOpc = BO.first.getOpcode();
3211 return Op;
3212
3213 if (BO.second % LoadLen != 0) {
3214 BO.first = DAG.getNode(ISD::ADD, dl, MVT::i32, BO.first,
3215 DAG.getConstant(BO.second % LoadLen, dl, MVT::i32));
3216 BO.second -= BO.second % LoadLen;
3217 }
3220 DAG.getConstant(NeedAlign, dl, MVT::i32))
3221 : BO.first;
3226
3231 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen),
3232 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
3233 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
3234 }
3235
3236 SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO);
3237 SDValue Load1 = DAG.getLoad(LoadTy, dl, Chain, Base1, WideMMO);
3238
3240 {Load1, Load0, BaseNoOff.getOperand(0)});
3244 return M;
3245}
3246
3249 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
3250 auto *CY = dyn_cast(Y);
3251 if (!CY)
3253
3255 SDVTList VTs = Op.getNode()->getVTList();
3258 unsigned Opc = Op.getOpcode();
3259
3260 if (CY) {
3261 uint64_t VY = CY->getZExtValue();
3262 assert(VY != 0 && "This should have been folded");
3263
3264 if (VY != 1)
3266
3272 }
3278 }
3279 }
3280
3282}
3283
3287 unsigned Opc = Op.getOpcode();
3288 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), C = Op.getOperand(2);
3289
3292 { X, Y, C });
3293
3294 EVT CarryTy = C.getValueType();
3296 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
3300}
3301
3304 SDValue Chain = Op.getOperand(0);
3306 SDValue Handler = Op.getOperand(2);
3309
3310
3314
3315 unsigned OffsetReg = Hexagon::R28;
3316
3322
3323
3324
3325
3327}
3328
3331 unsigned Opc = Op.getOpcode();
3332
3333
3336
3337 if (isHvxOperation(Op.getNode(), DAG)) {
3338
3339 if (SDValue V = LowerHvxOperation(Op, DAG))
3340 return V;
3341 }
3342
3343 switch (Opc) {
3344 default:
3345#ifndef NDEBUG
3346 Op.getNode()->dumpr(&DAG);
3348 errs() << "Error: check for a non-legal type in this operation\n";
3349#endif
3351
3392 break;
3393 }
3394
3396}
3397
3398void
3402 if (isHvxOperation(N, DAG)) {
3403 LowerHvxOperationWrapper(N, Results, DAG);
3405 return;
3406 }
3407
3409 unsigned Opc = N->getOpcode();
3410
3411 switch (Opc) {
3414 Results.push_back(opJoin(SplitVectorOp(Op, DAG), SDLoc(Op), DAG));
3415 break;
3417
3418
3419
3420
3421
3422 return;
3423 default:
3425 break;
3426 }
3427}
3428
3429void
3433 if (isHvxOperation(N, DAG)) {
3434 ReplaceHvxNodeResults(N, Results, DAG);
3436 return;
3437 }
3438
3440 switch (N->getOpcode()) {
3444 return;
3446
3447 if (N->getValueType(0) == MVT::i8) {
3448 if (N->getOperand(0).getValueType() == MVT::v8i1) {
3449 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
3450 N->getOperand(0), DAG);
3453 }
3454 }
3455 break;
3456 }
3457}
3458
3462 if (isHvxOperation(N, DCI.DAG)) {
3463 if (SDValue V = PerformHvxDAGCombine(N, DCI))
3464 return V;
3466 }
3467
3470 unsigned Opc = Op.getOpcode();
3471
3474
3476 EVT TruncTy = Op.getValueType();
3478
3480 return Elem0;
3481
3484 }
3485 }
3486
3489
3492 switch (P.getOpcode()) {
3496 return getZero(dl, ty(Op), DCI.DAG);
3497 default:
3498 break;
3499 }
3501
3502
3503
3506 SDValue C0 = Cond.getOperand(0), C1 = Cond.getOperand(1);
3509 Op.getOperand(2), Op.getOperand(1));
3510 return VSel;
3511 }
3512 }
3515
3519
3520 if (ty(Elem0) == TruncTy)
3521 return Elem0;
3522
3523 if (ty(Elem0).bitsGT(TruncTy))
3525 }
3526 } else if (Opc == ISD::OR) {
3527
3528
3529 auto fold0 = [&, this](SDValue Op) {
3530 if (ty(Op) != MVT::i64)
3536
3539
3541 auto *Amt = dyn_cast(Shl.getOperand(1));
3542 if (Amt && Amt->getZExtValue() >= 32 && ty(Z).getSizeInBits() <= 32) {
3543 unsigned A = Amt->getZExtValue();
3550 }
3552 };
3553
3555 return R;
3556 }
3557
3559}
3560
3561
3565 int Idx = cast(Table)->getIndex();
3569}
3570
3571
3572
3573
3574
3577 if (Constraint.size() == 1) {
3578 switch (Constraint[0]) {
3579 case 'q':
3580 case 'v':
3583 break;
3584 case 'a':
3586 default:
3587 break;
3588 }
3589 }
3591}
3592
3593std::pair<unsigned, const TargetRegisterClass*>
3596
3597 if (Constraint.size() == 1) {
3598 switch (Constraint[0]) {
3599 case 'r':
3601 default:
3602 return {0u, nullptr};
3603 case MVT::i1:
3604 case MVT::i8:
3605 case MVT::i16:
3606 case MVT::i32:
3607 case MVT::f32:
3608 return {0u, &Hexagon::IntRegsRegClass};
3609 case MVT::i64:
3610 case MVT::f64:
3611 return {0u, &Hexagon::DoubleRegsRegClass};
3612 }
3613 break;
3614 case 'a':
3615 if (VT != MVT::i32)
3616 return {0u, nullptr};
3617 return {0u, &Hexagon::ModRegsRegClass};
3618 case 'q':
3620 default:
3621 return {0u, nullptr};
3622 case 64:
3623 case 128:
3624 return {0u, &Hexagon::HvxQRRegClass};
3625 }
3626 break;
3627 case 'v':
3629 default:
3630 return {0u, nullptr};
3631 case 512:
3632 return {0u, &Hexagon::HvxVRRegClass};
3633 case 1024:
3635 return {0u, &Hexagon::HvxVRRegClass};
3636 return {0u, &Hexagon::HvxWRRegClass};
3637 case 2048:
3638 return {0u, &Hexagon::HvxWRRegClass};
3639 }
3640 break;
3641 default:
3642 return {0u, nullptr};
3643 }
3644 }
3645
3647}
3648
3649
3650
3651
3653 bool ForCodeSize) const {
3654 return true;
3655}
3656
3657
3658
3663
3664
3665
3666
3667
3668
3669
3670 Align A = DL.getABITypeAlign(Ty);
3671
3673 return false;
3674
3676 return false;
3677 }
3678
3679
3681 return false;
3682
3683 int Scale = AM.Scale;
3684 if (Scale < 0)
3685 Scale = -Scale;
3686 switch (Scale) {
3687 case 0:
3688 break;
3689 default:
3690 return false;
3691 }
3692 return true;
3693}
3694
3695
3696
3698 const {
3700}
3701
3702
3703
3704
3705
3707 return Imm >= -512 && Imm <= 511;
3708}
3709
3710
3711
3712
3716 bool IsVarArg,
3717 bool IsCalleeStructRet,
3718 bool IsCallerStructRet,
3725 bool CCMatch = CallerCC == CalleeCC;
3726
3727
3728
3729
3730
3731
3732
3733 if (!isa(Callee) &&
3734 !isa(Callee)) {
3735 return false;
3736 }
3737
3738
3739
3740 if (!CCMatch) {
3743
3744 if (!R || !E)
3745 return false;
3746 }
3747
3748
3749 if (IsVarArg)
3750 return false;
3751
3752
3753
3754 if (IsCalleeStructRet || IsCallerStructRet)
3755 return false;
3756
3757
3758
3759
3760
3761 return true;
3762}
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3777 if (Op.size() >= 8 && Op.isAligned(Align(8)))
3778 return MVT::i64;
3779 if (Op.size() >= 4 && Op.isAligned(Align(4)))
3780 return MVT::i32;
3781 if (Op.size() >= 2 && Op.isAligned(Align(2)))
3782 return MVT::i16;
3783 return MVT::Other;
3784}
3785
3790 return false;
3793 return allowsHvxMemoryAccess(SVT, Flags, Fast);
3795 Context, DL, VT, AddrSpace, Alignment, Flags, Fast);
3796}
3797
3800 unsigned *Fast) const {
3802 return false;
3805 return allowsHvxMisalignedMemoryAccesses(SVT, Flags, Fast);
3808 return false;
3809}
3810
3811std::pair<const TargetRegisterClass*, uint8_t>
3813 MVT VT) const {
3817
3819 return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3821 return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3823 return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3824 }
3825
3827}
3828
3831
3833 return false;
3834
3835 auto *L = cast(Load);
3836 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr());
3837
3839 return false;
3840 if (GlobalAddressSDNode *GA = dyn_cast(BO.first)) {
3842 const auto *GO = dyn_cast_or_null(GA->getGlobal());
3844 }
3845 return true;
3846}
3847
3849 SDNode *Node) const {
3850 AdjustHvxInstrPostInstrSelection(MI, Node);
3851}
3852
3857 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
3858 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3859 : Intrinsic::hexagon_L4_loadd_locked;
3860
3862 Builder.CreateIntrinsic(IntID, {}, Addr, nullptr, "larx");
3863
3865}
3866
3867
3868
3876
3878 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
3879 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3880 : Intrinsic::hexagon_S4_stored_locked;
3881
3883
3885 nullptr, "stcx");
3888 return Ext;
3889}
3890
3893
3897}
3898
3901
3902 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64
3905}
3906
3911}
unsigned const MachineRegisterInfo * MRI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static cl::opt< int > MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memcpy"))
static Value * getUnderLyingObjectForBrevLdIntr(Value *V)
static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static cl::opt< bool > AlignLoads("hexagon-align-loads", cl::Hidden, cl::init(false), cl::desc("Rewrite unaligned loads as a pair of aligned loads"))
static bool isBrevLdIntrinsic(const Value *Inst)
static cl::opt< int > MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memmove"))
static cl::opt< int > MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memmove"))
static Value * getBrevLdObject(Value *V)
static cl::opt< int > MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::init(8), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden, cl::init(false), cl::desc("Disable minimum alignment of 1 for " "arguments passed by value on stack"))
static Value * returnEdge(const PHINode *PN, Value *IntrBaseVal)
static cl::opt< int > MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memcpy"))
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
static cl::opt< int > MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden, cl::desc("Control jump table emission on Hexagon target"))
static cl::opt< int > MinimumJumpTables("minimum-jump-tables", cl::Hidden, cl::init(5), cl::desc("Set minimum jump tables"))
static cl::opt< bool > EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden, cl::desc("Enable Hexagon SDNode scheduling"))
static cl::opt< bool > EnableFastMath("ffast-math", cl::Hidden, cl::desc("Enable Fast Math processing"))
#define Hexagon_PointerSize
#define HEXAGON_LRFP_SIZE
#define HEXAGON_GOT_SYM_NAME
Module.h This file contains the declarations for the Module class.
std::pair< MCSymbol *, MachineModuleInfoImpl::StubValueTy > PairTy
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static llvm::Type * getVectorElementType(llvm::Type *Ty)
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
The address of a basic block.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
const APInt & getValue() const
Return the constant as an APInt value reference.
MachineConstantPoolValue * getMachineCPVal() const
bool isMachineConstantPoolEntry() const
const Constant * getConstVal() const
int64_t getSExtValue() const
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
This is the base abstract class for diagnostic reporting in the backend.
Interface for custom diagnostic printing.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
const GlobalObject * getAliaseeObject() const
bool isValidAutoIncImm(const EVT VT, const int Offset) const
Hexagon target-specific information for each MachineFunction.
int getVarArgsFrameIndex()
void setFirstNamedArgFrameIndex(int v)
void setHasEHReturn(bool H=true)
Register getStackRegister() const
Register getFrameRegister(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const HexagonInstrInfo * getInstrInfo() const override
const HexagonFrameLowering * getFrameLowering() const override
bool useSmallData() const
const HexagonRegisterInfo * getRegisterInfo() const override
bool isHVXVectorType(EVT VecTy, bool IncludeBool=false) const
Align getTypeAlignment(MVT Ty) const
unsigned getVectorLength() const
bool useHVX128BOps() const
bool isEnvironmentMusl() const
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
bool isTargetCanonicalConstantNode(SDValue Op) const override
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const
AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, SDValue InGlue, EVT PtrVT, unsigned ReturnReg, unsigned char OperandGlues) const
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
SDValue LowerCallResult(SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals, const SmallVectorImpl< SDValue > &OutVals, SDValue Callee) const
LowerCallResult - Lower the result values of an ISD::CALL into the appropriate copies out of appropri...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Return true if the target supports a memory access of this type for the given address space and align...
SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool isShuffleMaskLegal(ArrayRef< int > Mask, EVT VT) const override
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
LegalizeAction getCustomOperationAction(SDNode &Op) const override
How to legalize this custom operation?
SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerUAddSubOCarry(SDValue Op, SelectionDAG &DAG) const
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
LowerCall - Functions arguments are copied from virtual regs to (physical regs)/(stack frame),...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const
SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
HexagonTargetLowering(const TargetMachine &TM, const HexagonSubtarget &ST)
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool IsEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet, bool isCallerStructRet, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SmallVectorImpl< ISD::InputArg > &Ins, SelectionDAG &DAG) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &, EVT) const override
Return true if an FMA operation is faster than a pair of mul and add instructions.
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const override
Return true if it is profitable to reduce a load to a smaller type.
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const
SDValue LowerREADSTEADYCOUNTER(SDValue Op, SelectionDAG &DAG) const
HexagonTargetObjectFile * getObjFileLowering() const override
bool isGlobalInSmallSection(const GlobalObject *GO, const TargetMachine &TM) const
Return true if this global value should be placed into small data/bss section.
Common base class shared among various IRBuilders.
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
BasicBlock * GetInsertBlock() const
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Base class for LoadSDNode and StoreSDNode.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
unsigned getNumFixedObjects() const
Return the number of fixed objects.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getRegister(Register Reg, EVT VT)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ UndefinedBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool isTargetCanonicalConstantNode(SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ MO_PCREL
MO_PCREL - On a symbol operand, indicates a PC-relative relocation Used for computing a global addres...
@ MO_GOT
MO_GOT - Indicates a GOT-relative relocation.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ GLOBAL_OFFSET_TABLE
The address of the GOT.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
int getNextAvailablePluginDiagnosticKind()
Get the next available kind ID for a plugin diagnostic.
unsigned M0(unsigned Val)
constexpr unsigned BitWidth
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
unsigned Log2(Align A)
Returns the log2 of the alignment.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const