LLVM: lib/Target/AVR/AVRISelLowering.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
15
26
32
33namespace llvm {
34
38
41
42
44
50
53
58
60
65 }
66 }
67
69
75 }
76
77
78
81
82
83
96
101
107
118
120
121
130
132
137
138
147 }
148
149
158
159
166
167
170
171
174
175
176
177 if (.supportsMultiplication()) {
180 }
181
185 }
186
191 }
192
195
196
197
198
199 }
200
201
208
209
214
215
218
221}
222
224#define NODE(name) \
225 case AVRISD:📛 \
226 return #name
227
228 switch (Opcode) {
229 default:
230 return nullptr;
231 NODE(RET_GLUE);
232 NODE(RETI_GLUE);
234 NODE(WRAPPER);
243 NODE(LSLLOOP);
244 NODE(LSRLOOP);
245 NODE(ROLLOOP);
246 NODE(RORLOOP);
247 NODE(ASRLOOP);
248 NODE(BRCOND);
252 NODE(SELECT_CC);
253#undef NODE
254 }
255}
256
258 EVT VT) const {
259 assert(!VT.isVector() && "No AVR SetCC type for vectors!");
260 return MVT::i8;
261}
262
264 unsigned Opc8;
266 EVT VT = Op.getValueType();
269 "Expected power-of-2 shift amount");
270
272 if (!isa(N->getOperand(1))) {
273
274
276 }
277 SDVTList ResTys = DAG.getVTList(MVT::i16, MVT::i16);
278 SDValue SrcLo =
281 SDValue SrcHi =
284 uint64_t ShiftAmount = N->getConstantOperandVal(1);
285 if (ShiftAmount == 16) {
286
287
288
289
290 switch (Op.getOpcode()) {
292 SDValue Zero = DAG.getConstant(0, dl, MVT::i16);
294 }
298 }
299 }
300 }
302 unsigned Opc;
303 switch (Op.getOpcode()) {
304 default:
308 break;
311 break;
314 break;
315 }
316 SDValue Result = DAG.getNode(Opc, dl, ResTys, SrcLo, SrcHi, Cnt);
319 }
320
321
322 if (!isa(N->getOperand(1))) {
323 switch (Op.getOpcode()) {
324 default:
328 N->getOperand(1));
331 N->getOperand(1));
333 SDValue Amt = N->getOperand(1);
334 EVT AmtVT = Amt.getValueType();
338 }
340 SDValue Amt = N->getOperand(1);
341 EVT AmtVT = Amt.getValueType();
345 }
348 N->getOperand(1));
349 }
350 }
351
352 uint64_t ShiftAmount = N->getConstantOperandVal(1);
353 SDValue Victim = N->getOperand(0);
354
355 switch (Op.getOpcode()) {
358 break;
362 break;
366 break;
369 break;
372 break;
373 default:
375 }
376
377
379 if (Op.getOpcode() == ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {
380
382 Victim =
384 ShiftAmount -= 4;
385 } else if (Op.getOpcode() == ISD::SRL && 4 <= ShiftAmount &&
386 ShiftAmount < 7) {
387
389 Victim =
391 ShiftAmount -= 4;
392 } else if (Op.getOpcode() == ISD::SHL && ShiftAmount == 7) {
393
396 ShiftAmount = 0;
397 } else if (Op.getOpcode() == ISD::SRL && ShiftAmount == 7) {
398
401 ShiftAmount = 0;
402 } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 6) {
403
406 ShiftAmount = 0;
407 } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 7) {
408
411 ShiftAmount = 0;
412 } else if (Op.getOpcode() == ISD::ROTL && ShiftAmount == 3) {
413
416 ShiftAmount = 0;
417 } else if (Op.getOpcode() == ISD::ROTR && ShiftAmount == 3) {
418
421 ShiftAmount = 0;
422 } else if (Op.getOpcode() == ISD::ROTL && ShiftAmount == 7) {
423
425 ShiftAmount = 0;
426 } else if (Op.getOpcode() == ISD::ROTR && ShiftAmount == 7) {
427
429 ShiftAmount = 0;
431 ShiftAmount >= 4) {
432
434 ShiftAmount -= 4;
435 }
438
439 switch (ShiftAmount) {
440 case 15:
443 ShiftAmount = 0;
444 break;
445 case 14:
448 ShiftAmount = 0;
449 break;
450 case 7:
453 ShiftAmount = 0;
454 break;
455 default:
456 break;
457 }
458 if (4 <= ShiftAmount && ShiftAmount < 8)
459 switch (Op.getOpcode()) {
463 ShiftAmount -= 4;
464 break;
468 ShiftAmount -= 4;
469 break;
470 default:
471 break;
472 }
473 else if (8 <= ShiftAmount && ShiftAmount < 12)
474 switch (Op.getOpcode()) {
478 ShiftAmount -= 8;
479
481 break;
485 ShiftAmount -= 8;
486
488 break;
492 ShiftAmount -= 8;
493
495 break;
496 default:
497 break;
498 }
499 else if (12 <= ShiftAmount)
500 switch (Op.getOpcode()) {
504 ShiftAmount -= 12;
505
507 break;
511 ShiftAmount -= 12;
512
514 break;
518 ShiftAmount -= 8;
519
521 break;
522 default:
523 break;
524 }
525 }
526
527 while (ShiftAmount--) {
528 Victim = DAG.getNode(Opc8, dl, VT, Victim);
529 }
530
531 return Victim;
532}
533
534SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
535 unsigned Opcode = Op->getOpcode();
537 "Invalid opcode for Div/Rem lowering");
539 EVT VT = Op->getValueType(0);
540 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
541
543 switch (VT.getSimpleVT().SimpleTy) {
544 default:
546 case MVT::i8:
547 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
548 break;
549 case MVT::i16:
550 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
551 break;
552 case MVT::i32:
553 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
554 break;
555 }
556
557 SDValue InChain = DAG.getEntryNode();
558
560 TargetLowering::ArgListEntry Entry;
561 for (SDValue const &Value : Op->op_values()) {
563 Entry.Ty = Value.getValueType().getTypeForEVT(*DAG.getContext());
564 Entry.IsSExt = IsSigned;
565 Entry.IsZExt = !IsSigned;
566 Args.push_back(Entry);
567 }
568
571
573
574 SDLoc dl(Op);
575 TargetLowering::CallLoweringInfo CLI(DAG);
576 CLI.setDebugLoc(dl)
577 .setChain(InChain)
579 .setInRegister()
580 .setSExtResult(IsSigned)
581 .setZExtResult(!IsSigned);
582
583 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
584 return CallInfo.first;
585}
586
587SDValue AVRTargetLowering::LowerGlobalAddress(SDValue Op,
588 SelectionDAG &DAG) const {
589 auto DL = DAG.getDataLayout();
590
591 const GlobalValue *GV = cast(Op)->getGlobal();
592 int64_t Offset = cast(Op)->getOffset();
593
594
598}
599
600SDValue AVRTargetLowering::LowerBlockAddress(SDValue Op,
601 SelectionDAG &DAG) const {
602 auto DL = DAG.getDataLayout();
603 const BlockAddress *BA = cast(Op)->getBlockAddress();
604
606
608}
609
610
612 switch (CC) {
613 default:
627 }
628}
629
630
631SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
632 SelectionDAG &DAG, SDLoc DL) const {
633 assert((LHS.getSimpleValueType() == RHS.getSimpleValueType()) &&
634 "LHS and RHS have different types");
635 assert(((LHS.getSimpleValueType() == MVT::i16) ||
636 (LHS.getSimpleValueType() == MVT::i8)) &&
637 "invalid comparison type");
638
639 SDValue Cmp;
640
641 if (LHS.getSimpleValueType() == MVT::i16 && isa(RHS)) {
643
644
646 DAG.getIntPtrConstant(0, DL));
648 DAG.getIntPtrConstant(1, DL));
649 SDValue RHSlo = (Imm & 0xff) == 0
652 DAG.getIntPtrConstant(0, DL));
653 SDValue RHShi = (Imm & 0xff00) == 0
656 DAG.getIntPtrConstant(1, DL));
657 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo);
658 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
659 } else if (RHS.getSimpleValueType() == MVT::i16 && isa(LHS)) {
660
661
663 SDValue LHSlo = (Imm & 0xff) == 0
666 DAG.getIntPtrConstant(0, DL));
667 SDValue LHShi = (Imm & 0xff00) == 0
670 DAG.getIntPtrConstant(1, DL));
672 DAG.getIntPtrConstant(0, DL));
674 DAG.getIntPtrConstant(1, DL));
676 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
677 } else {
678
680 }
681
682 return Cmp;
683}
684
685
686
688 SDValue &AVRcc, SelectionDAG &DAG,
689 SDLoc DL) const {
690 SDValue Cmp;
691 EVT VT = LHS.getValueType();
692 bool UseTest = false;
693
694 switch (CC) {
695 default:
696 break;
698
701 break;
702 }
704 if (const ConstantSDNode *C = dyn_cast(RHS)) {
705 switch (C->getSExtValue()) {
706 case -1: {
707
708
709 UseTest = true;
711 break;
712 }
713 case 0: {
714
715
717 LHS = DAG.getConstant(0, DL, VT);
719 break;
720 }
721 default: {
722
723
724 RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
726 break;
727 }
728 }
729 break;
730 }
731
734 break;
735 }
737 if (const ConstantSDNode *C = dyn_cast(RHS)) {
738 switch (C->getSExtValue()) {
739 case 1: {
740
741
743 LHS = DAG.getConstant(0, DL, VT);
745 break;
746 }
747 case 0: {
748
749
750 UseTest = true;
752 break;
753 }
754 }
755 }
756 break;
757 }
759
762 break;
763 }
765
766
767 if (const ConstantSDNode *C = dyn_cast(RHS)) {
768 RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
770 break;
771 }
772
775 break;
776 }
777 }
778
779
780
781 if (VT == MVT::i32) {
783 DAG.getIntPtrConstant(0, DL));
785 DAG.getIntPtrConstant(1, DL));
787 DAG.getIntPtrConstant(0, DL));
789 DAG.getIntPtrConstant(1, DL));
790
791 if (UseTest) {
792
794 DAG.getIntPtrConstant(1, DL));
796 } else {
797 Cmp = getAVRCmp(LHSlo, RHSlo, DAG, DL);
798 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
799 }
800 } else if (VT == MVT::i64) {
802 DAG.getIntPtrConstant(0, DL));
804 DAG.getIntPtrConstant(1, DL));
805
807 DAG.getIntPtrConstant(0, DL));
809 DAG.getIntPtrConstant(1, DL));
811 DAG.getIntPtrConstant(0, DL));
813 DAG.getIntPtrConstant(1, DL));
814
816 DAG.getIntPtrConstant(0, DL));
818 DAG.getIntPtrConstant(1, DL));
819
821 DAG.getIntPtrConstant(0, DL));
823 DAG.getIntPtrConstant(1, DL));
825 DAG.getIntPtrConstant(0, DL));
827 DAG.getIntPtrConstant(1, DL));
828
829 if (UseTest) {
830
832 DAG.getIntPtrConstant(1, DL));
834 } else {
835 Cmp = getAVRCmp(LHS0, RHS0, DAG, DL);
839 }
840 } else if (VT == MVT::i8 || VT == MVT::i16) {
841 if (UseTest) {
842
844 (VT == MVT::i8)
847 LHS, DAG.getIntPtrConstant(1, DL)));
848 } else {
850 }
851 } else {
853 }
854
855
856 if (!UseTest) {
858 }
859
860 return Cmp;
861}
862
863SDValue AVRTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
864 SDValue Chain = Op.getOperand(0);
865 ISD::CondCode CC = cast(Op.getOperand(1))->get();
866 SDValue LHS = Op.getOperand(2);
867 SDValue RHS = Op.getOperand(3);
868 SDValue Dest = Op.getOperand(4);
869 SDLoc dl(Op);
870
871 SDValue TargetCC;
872 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
873
874 return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,
875 Cmp);
876}
877
878SDValue AVRTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
879 SDValue LHS = Op.getOperand(0);
880 SDValue RHS = Op.getOperand(1);
881 SDValue TrueV = Op.getOperand(2);
882 SDValue FalseV = Op.getOperand(3);
883 ISD::CondCode CC = cast(Op.getOperand(4))->get();
884 SDLoc dl(Op);
885
886 SDValue TargetCC;
887 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
888
889 SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
890
892}
893
894SDValue AVRTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
895 SDValue LHS = Op.getOperand(0);
896 SDValue RHS = Op.getOperand(1);
897 ISD::CondCode CC = cast(Op.getOperand(2))->get();
899
900 SDValue TargetCC;
901 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, DL);
902
903 SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType());
904 SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType());
905 SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
906
908}
909
910SDValue AVRTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
911 const MachineFunction &MF = DAG.getMachineFunction();
912 const AVRMachineFunctionInfo *AFI = MF.getInfo();
913 const Value *SV = cast(Op.getOperand(2))->getValue();
914 auto DL = DAG.getDataLayout();
915 SDLoc dl(Op);
916
917
918
919 SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), getPointerTy(DL));
920
921 return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
922 MachinePointerInfo(SV));
923}
924
925
926SDValue AVRTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
930
931
932 return Op;
933 }
934
935
936
937
938
939
940
941 SDLoc dl(Op);
942 SmallVector<SDValue, 8> Ops;
944 SDValue Glue;
945 for (unsigned I = 0; I < N->getNumOperands(); I++) {
946 SDValue Operand = N->getOperand(I);
947 if (Operand.getValueType() == MVT::Glue) {
948
949
950 Glue = Operand;
951 } else {
952 Ops.push_back(Operand);
953 }
954 }
956 Ops.push_back(DAG.getTargetConstant(Flags, dl, MVT::i32));
957 Ops.push_back(ZeroReg);
958 if (Glue) {
959 Ops.push_back(Glue);
960 }
961
962
963
964 SDValue New = DAG.getNode(N->getOpcode(), dl, N->getVTList(), Ops);
965 DAG.ReplaceAllUsesOfValueWith(Op, New);
966 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), New.getValue(1));
967
968 return New;
969}
970
972 switch (Op.getOpcode()) {
973 default:
980 return LowerShifts(Op, DAG);
982 return LowerGlobalAddress(Op, DAG);
984 return LowerBlockAddress(Op, DAG);
986 return LowerBR_CC(Op, DAG);
988 return LowerSELECT_CC(Op, DAG);
990 return LowerSETCC(Op, DAG);
992 return LowerVASTART(Op, DAG);
995 return LowerDivRem(Op, DAG);
997 return LowerINLINEASM(Op, DAG);
998 }
999
1001}
1002
1003
1004
1009
1010 switch (N->getOpcode()) {
1012
1013 if (const ConstantSDNode *C = dyn_cast(N->getOperand(1))) {
1015 ISD::SUB, DL, N->getValueType(0), N->getOperand(0),
1016 DAG.getConstant(-C->getAPIntValue(), DL, C->getValueType(0)));
1018 }
1019 break;
1020 }
1021 default: {
1023
1026
1027 break;
1028 }
1029 }
1030}
1031
1032
1033
1036 unsigned AS,
1039
1040
1042 return true;
1043 }
1044
1045
1047 return false;
1048 }
1049
1050
1051 if (Offs < 0)
1052 Offs = -Offs;
1054 isUInt<6>(Offs)) {
1055 return true;
1056 }
1057
1058 return false;
1059}
1060
1061
1062
1063
1071
1072 if (const LoadSDNode *LD = dyn_cast(N)) {
1073 VT = LD->getMemoryVT();
1074 Op = LD->getBasePtr().getNode();
1076 return false;
1078 return false;
1079 }
1080 } else if (const StoreSDNode *ST = dyn_cast(N)) {
1081 VT = ST->getMemoryVT();
1082 Op = ST->getBasePtr().getNode();
1084 return false;
1085 }
1086 } else {
1087 return false;
1088 }
1089
1090 if (VT != MVT::i8 && VT != MVT::i16) {
1091 return false;
1092 }
1093
1095 return false;
1096 }
1097
1098 if (const ConstantSDNode *RHS = dyn_cast(Op->getOperand(1))) {
1099 int RHSC = RHS->getSExtValue();
1101 RHSC = -RHSC;
1102
1103 if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
1104 return false;
1105 }
1106
1107 Base = Op->getOperand(0);
1110
1111 return true;
1112 }
1113
1114 return false;
1115}
1116
1117
1118
1119
1127
1128 if (const LoadSDNode *LD = dyn_cast(N)) {
1129 VT = LD->getMemoryVT();
1131 return false;
1132 } else if (const StoreSDNode *ST = dyn_cast(N)) {
1133 VT = ST->getMemoryVT();
1134
1136 return false;
1137
1138
1139
1140
1141 if (VT == MVT::i16 && .hasLowByteFirst())
1142 return false;
1143 } else {
1144 return false;
1145 }
1146
1147 if (VT != MVT::i8 && VT != MVT::i16) {
1148 return false;
1149 }
1150
1152 return false;
1153 }
1154
1155 if (const ConstantSDNode *RHS = dyn_cast(Op->getOperand(1))) {
1156 int RHSC = RHS->getSExtValue();
1158 RHSC = -RHSC;
1159 if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
1160 return false;
1161 }
1162
1163
1164
1165 if (const LoadSDNode *LD = dyn_cast(N))
1167 return false;
1168
1169 Base = Op->getOperand(0);
1172
1173 return true;
1174 }
1175
1176 return false;
1177}
1178
1181 return true;
1182}
1183
1184
1185
1186
1187
1188#include "AVRGenCallingConv.inc"
1189
1190
1191
1193 AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
1194 AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
1195 AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};
1197 AVR::R22, AVR::R21, AVR::R20};
1199 AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, AVR::R22R21,
1200 AVR::R21R20, AVR::R20R19, AVR::R19R18, AVR::R18R17, AVR::R17R16,
1201 AVR::R16R15, AVR::R15R14, AVR::R14R13, AVR::R13R12, AVR::R12R11,
1202 AVR::R11R10, AVR::R10R9, AVR::R9R8};
1204 AVR::R24R23, AVR::R23R22,
1205 AVR::R22R21, AVR::R21R20};
1206
1208 "8-bit and 16-bit register arrays must be of equal length");
1210 "8-bit and 16-bit register arrays must be of equal length");
1211
1212
1213
1214
1215
1216template
1221 CCState &CCInfo, bool Tiny) {
1222
1225 if (Tiny) {
1228 } else {
1231 }
1232
1233 unsigned NumArgs = Args.size();
1234
1235
1236 int RegLastIdx = -1;
1237
1238 bool UseStack = false;
1239 for (unsigned i = 0; i != NumArgs;) {
1240 MVT VT = Args[i].VT;
1241
1242
1243
1244
1245 unsigned ArgIndex = Args[i].OrigArgIndex;
1247 unsigned j = i + 1;
1248 for (; j != NumArgs; ++j) {
1249 if (Args[j].OrigArgIndex != ArgIndex)
1250 break;
1251 TotalBytes += Args[j].VT.getStoreSize();
1252 }
1253
1254 TotalBytes = alignTo(TotalBytes, 2);
1255
1256 if (TotalBytes == 0)
1257 continue;
1258
1259 unsigned RegIdx = RegLastIdx + TotalBytes;
1260 RegLastIdx = RegIdx;
1261
1262 if (RegIdx >= RegList8.size()) {
1263 UseStack = true;
1264 }
1265 for (; i != j; ++i) {
1266 MVT VT = Args[i].VT;
1267
1268 if (UseStack) {
1274 } else {
1275 unsigned Reg;
1276 if (VT == MVT::i8) {
1278 } else if (VT == MVT::i16) {
1280 } else {
1282 "calling convention can only manage i8 and i16 types");
1283 }
1284 assert(Reg && "register not available in calling convention");
1286
1287
1289 }
1290 }
1291 }
1292}
1293
1294
1295template
1296static unsigned
1298 unsigned TotalBytes = 0;
1299
1300 for (const ArgT &Arg : Args) {
1301 TotalBytes += Arg.VT.getStoreSize();
1302 }
1303 return TotalBytes;
1304}
1305
1306
1307
1308
1309template
1311 CCState &CCInfo, bool Tiny) {
1312 unsigned NumArgs = Args.size();
1314
1315 if (Tiny)
1316 assert(TotalBytes <= 4 &&
1317 "return values greater than 4 bytes cannot be lowered on AVRTiny");
1318 else
1319 assert(TotalBytes <= 8 &&
1320 "return values greater than 8 bytes cannot be lowered on AVR");
1321
1322
1325 if (Tiny) {
1328 } else {
1331 }
1332
1333
1334
1335 if (TotalBytes > 4) {
1336 TotalBytes = 8;
1337 } else {
1338 TotalBytes = alignTo(TotalBytes, 2);
1339 }
1340
1341
1342 int RegIdx = TotalBytes - 1;
1343 for (unsigned i = 0; i != NumArgs; ++i) {
1344 MVT VT = Args[i].VT;
1345 unsigned Reg;
1346 if (VT == MVT::i8) {
1348 } else if (VT == MVT::i16) {
1350 } else {
1351 llvm_unreachable("calling convention can only manage i8 and i16 types");
1352 }
1353 assert(Reg && "register not available in calling convention");
1355
1357 }
1358}
1359
1360SDValue AVRTargetLowering::LowerFormalArguments(
1362 const SmallVectorImplISD::InputArg &Ins, const SDLoc &dl,
1363 SelectionDAG &DAG, SmallVectorImpl &InVals) const {
1364 MachineFunction &MF = DAG.getMachineFunction();
1365 MachineFrameInfo &MFI = MF.getFrameInfo();
1366 auto DL = DAG.getDataLayout();
1367
1368
1369 SmallVector<CCValAssign, 16> ArgLocs;
1370 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1371 *DAG.getContext());
1372
1373
1374 if (isVarArg) {
1375 CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
1376 } else {
1377 analyzeArguments(nullptr, &MF.getFunction(), &DL, Ins, ArgLocs, CCInfo,
1379 }
1380
1381 SDValue ArgValue;
1382 for (CCValAssign &VA : ArgLocs) {
1383
1384
1385 if (VA.isRegLoc()) {
1386 EVT RegVT = VA.getLocVT();
1387 const TargetRegisterClass *RC;
1388 if (RegVT == MVT::i8) {
1389 RC = &AVR::GPR8RegClass;
1390 } else if (RegVT == MVT::i16) {
1391 RC = &AVR::DREGSRegClass;
1392 } else {
1394 }
1395
1396 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
1397 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1398
1399
1400
1401
1402
1403
1404
1405 switch (VA.getLocInfo()) {
1406 default:
1409 break;
1411 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1412 break;
1414 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
1415 DAG.getValueType(VA.getValVT()));
1416 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1417 break;
1419 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
1420 DAG.getValueType(VA.getValVT()));
1421 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1422 break;
1423 }
1424
1425 InVals.push_back(ArgValue);
1426 } else {
1427
1428 assert(VA.isMemLoc());
1429
1430 EVT LocVT = VA.getLocVT();
1431
1432
1433 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1434 VA.getLocMemOffset(), true);
1435
1436
1437
1438 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DL));
1439 InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
1441 }
1442 }
1443
1444
1445
1446 if (isVarArg) {
1447 unsigned StackSize = CCInfo.getStackSize();
1448 AVRMachineFunctionInfo *AFI = MF.getInfo();
1449
1450 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize, true));
1451 }
1452
1453 return Chain;
1454}
1455
1456
1457
1458
1459
1460SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1461 SmallVectorImpl &InVals) const {
1462 SelectionDAG &DAG = CLI.DAG;
1463 SDLoc &DL = CLI.DL;
1464 SmallVectorImplISD::OutputArg &Outs = CLI.Outs;
1465 SmallVectorImpl &OutVals = CLI.OutVals;
1466 SmallVectorImplISD::InputArg &Ins = CLI.Ins;
1467 SDValue Chain = CLI.Chain;
1468 SDValue Callee = CLI.Callee;
1469 bool &isTailCall = CLI.IsTailCall;
1471 bool isVarArg = CLI.IsVarArg;
1472
1473 MachineFunction &MF = DAG.getMachineFunction();
1474
1475
1476 isTailCall = false;
1477
1478
1479 SmallVector<CCValAssign, 16> ArgLocs;
1480 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1481 *DAG.getContext());
1482
1483
1484
1485
1487 if (const GlobalAddressSDNode *G = dyn_cast(Callee)) {
1488 const GlobalValue *GV = G->getGlobal();
1489 if (isa(GV))
1490 F = cast(GV);
1492 DAG.getTargetGlobalAddress(GV, DL, getPointerTy(DAG.getDataLayout()));
1493 } else if (const ExternalSymbolSDNode *ES =
1494 dyn_cast(Callee)) {
1495 Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
1497 }
1498
1499
1500 if (isVarArg) {
1501 CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
1502 } else {
1503 analyzeArguments(&CLI, F, &DAG.getDataLayout(), Outs, ArgLocs, CCInfo,
1505 }
1506
1507
1508 unsigned NumBytes = CCInfo.getStackSize();
1509
1510 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
1511
1512 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1513
1514
1515 unsigned AI, AE;
1516 bool HasStackArgs = false;
1517 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1518 CCValAssign &VA = ArgLocs[AI];
1519 EVT RegVT = VA.getLocVT();
1520 SDValue Arg = OutVals[AI];
1521
1522
1523 switch (VA.getLocInfo()) {
1524 default:
1527 break;
1530 break;
1533 break;
1536 break;
1539 break;
1540 }
1541
1542
1543
1544 if (VA.isMemLoc()) {
1545 HasStackArgs = true;
1546 break;
1547 }
1548
1549
1550
1551 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1552 }
1553
1554
1555
1556
1557
1558
1559
1560 if (HasStackArgs) {
1561 SmallVector<SDValue, 8> MemOpChains;
1562 for (; AI != AE; AI++) {
1563 CCValAssign &VA = ArgLocs[AI];
1564 SDValue Arg = OutVals[AI];
1565
1566 assert(VA.isMemLoc());
1567
1568
1569 SDValue PtrOff = DAG.getNode(
1571 DAG.getRegister(AVR::SP, getPointerTy(DAG.getDataLayout())),
1572 DAG.getIntPtrConstant(VA.getLocMemOffset() + 1, DL));
1573
1574 MemOpChains.push_back(
1575 DAG.getStore(Chain, DL, Arg, PtrOff,
1577 }
1578
1579 if (!MemOpChains.empty())
1581 }
1582
1583
1584
1585
1586 SDValue InGlue;
1587 for (auto Reg : RegsToPass) {
1588 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, InGlue);
1589 InGlue = Chain.getValue(1);
1590 }
1591
1592
1593 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1594 SmallVector<SDValue, 8> Ops;
1595 Ops.push_back(Chain);
1596 Ops.push_back(Callee);
1597
1598
1599
1600 for (auto Reg : RegsToPass) {
1601 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
1602 }
1603
1604
1605
1607
1608
1611 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
1612 assert(Mask && "Missing call preserved mask for calling convention");
1613 Ops.push_back(DAG.getRegisterMask(Mask));
1614
1615 if (InGlue.getNode()) {
1616 Ops.push_back(InGlue);
1617 }
1618
1619 Chain = DAG.getNode(AVRISD::CALL, DL, NodeTys, Ops);
1620 InGlue = Chain.getValue(1);
1621
1622
1623 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, DL);
1624
1625 if (.empty()) {
1626 InGlue = Chain.getValue(1);
1627 }
1628
1629
1630
1631 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, DL, DAG,
1632 InVals);
1633}
1634
1635
1636
1637
1638SDValue AVRTargetLowering::LowerCallResult(
1639 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
1640 const SmallVectorImplISD::InputArg &Ins, const SDLoc &dl,
1641 SelectionDAG &DAG, SmallVectorImpl &InVals) const {
1642
1643
1644 SmallVector<CCValAssign, 16> RVLocs;
1645 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1646 *DAG.getContext());
1647
1648
1650 CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
1651 } else {
1653 }
1654
1655
1656 for (CCValAssign const &RVLoc : RVLocs) {
1657 Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
1658 InGlue)
1659 .getValue(1);
1660 InGlue = Chain.getValue(2);
1661 InVals.push_back(Chain.getValue(0));
1662 }
1663
1664 return Chain;
1665}
1666
1667
1668
1669
1670
1671bool AVRTargetLowering::CanLowerReturn(
1672 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
1673 const SmallVectorImplISD::OutputArg &Outs, LLVMContext &Context,
1674 const Type *RetTy) const {
1676 SmallVector<CCValAssign, 16> RVLocs;
1677 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1678 return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
1679 }
1680
1682 return TotalBytes <= (unsigned)(Subtarget.hasTinyEncoding() ? 4 : 8);
1683}
1684
1685SDValue
1686AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1687 bool isVarArg,
1688 const SmallVectorImplISD::OutputArg &Outs,
1689 const SmallVectorImpl &OutVals,
1690 const SDLoc &dl, SelectionDAG &DAG) const {
1691
1692 SmallVector<CCValAssign, 16> RVLocs;
1693
1694
1695 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1696 *DAG.getContext());
1697
1698 MachineFunction &MF = DAG.getMachineFunction();
1699
1700
1702 CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
1703 } else {
1705 }
1706
1707 SDValue Glue;
1708 SmallVector<SDValue, 4> RetOps(1, Chain);
1709
1710 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1711 CCValAssign &VA = RVLocs[i];
1712 assert(VA.isRegLoc() && "Can only return in registers!");
1713
1714 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
1715
1716
1717 Glue = Chain.getValue(1);
1718 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1719 }
1720
1721
1722
1723 if (MF.getFunction().getAttributes().hasFnAttr(Attribute::Naked)) {
1724 return Chain;
1725 }
1726
1727 const AVRMachineFunctionInfo *AFI = MF.getInfo();
1728
1729 if (!AFI->isInterruptOrSignalHandler()) {
1730
1731
1732
1733
1735 }
1736
1737 unsigned RetOpc =
1739
1740 RetOps[0] = Chain;
1741
1742 if (Glue.getNode()) {
1743 RetOps.push_back(Glue);
1744 }
1745
1746 return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);
1747}
1748
1749
1750
1751
1752
1753MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI,
1754 MachineBasicBlock *BB,
1755 bool Tiny) const {
1756 unsigned Opc;
1757 const TargetRegisterClass *RC;
1758 bool HasRepeatedOperand = false;
1759 MachineFunction *F = BB->getParent();
1760 MachineRegisterInfo &RI = F->getRegInfo();
1763
1764 switch (MI.getOpcode()) {
1765 default:
1767 case AVR::Lsl8:
1768 Opc = AVR::ADDRdRr;
1769 RC = &AVR::GPR8RegClass;
1770 HasRepeatedOperand = true;
1771 break;
1772 case AVR::Lsl16:
1773 Opc = AVR::LSLWRd;
1774 RC = &AVR::DREGSRegClass;
1775 break;
1776 case AVR::Asr8:
1777 Opc = AVR::ASRRd;
1778 RC = &AVR::GPR8RegClass;
1779 break;
1780 case AVR::Asr16:
1781 Opc = AVR::ASRWRd;
1782 RC = &AVR::DREGSRegClass;
1783 break;
1784 case AVR::Lsr8:
1785 Opc = AVR::LSRRd;
1786 RC = &AVR::GPR8RegClass;
1787 break;
1788 case AVR::Lsr16:
1789 Opc = AVR::LSRWRd;
1790 RC = &AVR::DREGSRegClass;
1791 break;
1792 case AVR::Rol8:
1793 Opc = Tiny ? AVR::ROLBRdR17 : AVR::ROLBRdR1;
1794 RC = &AVR::GPR8RegClass;
1795 break;
1796 case AVR::Rol16:
1797 Opc = AVR::ROLWRd;
1798 RC = &AVR::DREGSRegClass;
1799 break;
1800 case AVR::Ror8:
1801 Opc = AVR::RORBRd;
1802 RC = &AVR::GPR8RegClass;
1803 break;
1804 case AVR::Ror16:
1805 Opc = AVR::RORWRd;
1806 RC = &AVR::DREGSRegClass;
1807 break;
1808 }
1809
1810 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1811
1813 for (I = BB->getIterator(); I != F->end() && &(*I) != BB; ++I)
1814 ;
1816 ++I;
1817
1818
1819 MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(LLVM_BB);
1820 MachineBasicBlock *CheckBB = F->CreateMachineBasicBlock(LLVM_BB);
1821 MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(LLVM_BB);
1822
1826
1827
1828
1830 BB->end());
1831 RemBB->transferSuccessorsAndUpdatePHIs(BB);
1832
1833
1834 BB->addSuccessor(CheckBB);
1835 LoopBB->addSuccessor(CheckBB);
1836 CheckBB->addSuccessor(LoopBB);
1837 CheckBB->addSuccessor(RemBB);
1838
1839 Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass);
1840 Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass);
1841 Register ShiftReg = RI.createVirtualRegister(RC);
1842 Register ShiftReg2 = RI.createVirtualRegister(RC);
1843 Register ShiftAmtSrcReg = MI.getOperand(2).getReg();
1844 Register SrcReg = MI.getOperand(1).getReg();
1845 Register DstReg = MI.getOperand(0).getReg();
1846
1847
1848
1850
1851
1852
1853 auto ShiftMI = BuildMI(LoopBB, dl, TII.get(Opc), ShiftReg2).addReg(ShiftReg);
1854 if (HasRepeatedOperand)
1855 ShiftMI.addReg(ShiftReg);
1856
1857
1858
1859
1860
1861
1862
1863 BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftReg)
1868 BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftAmtReg)
1869 .addReg(ShiftAmtSrcReg)
1871 .addReg(ShiftAmtReg2)
1873 BuildMI(CheckBB, dl, TII.get(AVR::PHI), DstReg)
1878
1879 BuildMI(CheckBB, dl, TII.get(AVR::DECRd), ShiftAmtReg2).addReg(ShiftAmtReg);
1881
1882 MI.eraseFromParent();
1883 return RemBB;
1884}
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1903 const DebugLoc &dl = MI.getDebugLoc();
1904
1905 const bool ShiftLeft = Opc == ISD::SHL;
1906 const bool ArithmeticShift = Opc == ISD::SRA;
1907
1908
1909 Register ZeroReg = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1910 BuildMI(*BB, MI, dl, TII.get(AVR::COPY), ZeroReg)
1912
1913
1914
1915
1916
1917 if (ShiftLeft && (ShiftAmt % 8) >= 6) {
1918
1919
1920
1921
1922 size_t ShiftRegsOffset = ShiftAmt / 8;
1923 size_t ShiftRegsSize = Regs.size() - ShiftRegsOffset;
1925 Regs.slice(ShiftRegsOffset, ShiftRegsSize);
1926
1927
1928
1930
1931
1932
1933 Register LowByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1935
1936
1937 if (ShiftAmt % 8 == 6) {
1939 Register NewLowByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1940 BuildMI(*BB, MI, dl, TII.get(AVR::RORRd), NewLowByte).addReg(LowByte);
1941 LowByte = NewLowByte;
1942 }
1943
1944
1945 for (size_t I = 0; I < Regs.size(); I++) {
1946 int ShiftRegsIdx = I + 1;
1947 if (ShiftRegsIdx < (int)ShiftRegs.size()) {
1948 Regs[I] = ShiftRegs[ShiftRegsIdx];
1949 } else if (ShiftRegsIdx == (int)ShiftRegs.size()) {
1950 Regs[I] = std::pair(LowByte, 0);
1951 } else {
1952 Regs[I] = std::pair(ZeroReg, 0);
1953 }
1954 }
1955
1956 return;
1957 }
1958
1959
1960 if (!ShiftLeft && (ShiftAmt % 8) >= 6) {
1961
1962
1963 size_t ShiftRegsSize = Regs.size() - (ShiftAmt / 8);
1965 Regs.slice(0, ShiftRegsSize);
1966
1967
1969
1970
1971
1972
1973
1974 Register HighByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1976 if (ArithmeticShift) {
1977
1978 BuildMI(*BB, MI, dl, TII.get(AVR::SBCRdRr), HighByte)
1981 ExtByte = HighByte;
1982
1983
1984 } else {
1985
1986 ExtByte = ZeroReg;
1987
1988 BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), HighByte)
1991 }
1992
1993
1994 if (ShiftAmt % 8 == 6) {
1996
1997 Register NewExt = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1998 BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), NewExt)
2001 HighByte = NewExt;
2002 }
2003
2004
2005 for (int I = Regs.size() - 1; I >= 0; I--) {
2006 int ShiftRegsIdx = I - (Regs.size() - ShiftRegs.size()) - 1;
2007 if (ShiftRegsIdx >= 0) {
2008 Regs[I] = ShiftRegs[ShiftRegsIdx];
2009 } else if (ShiftRegsIdx == -1) {
2010 Regs[I] = std::pair(HighByte, 0);
2011 } else {
2012 Regs[I] = std::pair(ExtByte, 0);
2013 }
2014 }
2015
2016 return;
2017 }
2018
2019
2020
2021 while (ShiftLeft && ShiftAmt >= 8) {
2022
2023 for (size_t I = 0; I < Regs.size() - 1; I++) {
2025 }
2026
2027
2028 Regs[Regs.size() - 1] = std::pair(ZeroReg, 0);
2029
2030
2031 Regs = Regs.drop_back(1);
2032
2033 ShiftAmt -= 8;
2034 }
2035
2036
2038 if (!ShiftLeft && ShiftAmt >= 8) {
2039 if (ArithmeticShift) {
2040
2041 ShrExtendReg = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2042 Register Tmp = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2043 BuildMI(*BB, MI, dl, TII.get(AVR::ADDRdRr), Tmp)
2044 .addReg(Regs[0].first, 0, Regs[0].second)
2045 .addReg(Regs[0].first, 0, Regs[0].second);
2046 BuildMI(*BB, MI, dl, TII.get(AVR::SBCRdRr), ShrExtendReg)
2049 } else {
2050 ShrExtendReg = ZeroReg;
2051 }
2052 for (; ShiftAmt >= 8; ShiftAmt -= 8) {
2053
2054 for (size_t I = Regs.size() - 1; I != 0; I--) {
2056 }
2057
2058
2059 Regs[0] = std::pair(ShrExtendReg, 0);
2060
2061
2062 Regs = Regs.drop_front(1);
2063 }
2064 }
2065
2066
2067 assert((ShiftAmt < 8) && "Unexpect shift amount");
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087 if (!ArithmeticShift && ShiftAmt >= 4) {
2089 for (size_t I = 0; I < Regs.size(); I++) {
2090 size_t Idx = ShiftLeft ? I : Regs.size() - I - 1;
2091 Register SwapReg = MRI.createVirtualRegister(&AVR::LD8RegClass);
2092 BuildMI(*BB, MI, dl, TII.get(AVR::SWAPRd), SwapReg)
2093 .addReg(Regs[Idx].first, 0, Regs[Idx].second);
2094 if (I != 0) {
2095 Register R = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2096 BuildMI(*BB, MI, dl, TII.get(AVR::EORRdRr), R)
2099 Prev = R;
2100 }
2101 Register AndReg = MRI.createVirtualRegister(&AVR::LD8RegClass);
2102 BuildMI(*BB, MI, dl, TII.get(AVR::ANDIRdK), AndReg)
2104 .addImm(ShiftLeft ? 0xf0 : 0x0f);
2105 if (I != 0) {
2106 Register R = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2107 BuildMI(*BB, MI, dl, TII.get(AVR::EORRdRr), R)
2110 size_t PrevIdx = ShiftLeft ? Idx - 1 : Idx + 1;
2111 Regs[PrevIdx] = std::pair(R, 0);
2112 }
2113 Prev = AndReg;
2114 Regs[Idx] = std::pair(AndReg, 0);
2115 }
2116 ShiftAmt -= 4;
2117 }
2118
2119
2120
2121 while (ShiftLeft && ShiftAmt) {
2122
2123 for (ssize_t I = Regs.size() - 1; I >= 0; I--) {
2124 Register Out = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2126 Register InSubreg = Regs[I].second;
2127 if (I == (ssize_t)Regs.size() - 1) {
2128 BuildMI(*BB, MI, dl, TII.get(AVR::ADDRdRr), Out)
2129 .addReg(In, 0, InSubreg)
2130 .addReg(In, 0, InSubreg);
2131 } else {
2132 BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), Out)
2133 .addReg(In, 0, InSubreg)
2134 .addReg(In, 0, InSubreg);
2135 }
2136 Regs[I] = std::pair(Out, 0);
2137 }
2138 ShiftAmt--;
2139 }
2140 while (!ShiftLeft && ShiftAmt) {
2141
2142 for (size_t I = 0; I < Regs.size(); I++) {
2143 Register Out = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2145 Register InSubreg = Regs[I].second;
2146 if (I == 0) {
2147 unsigned Opc = ArithmeticShift ? AVR::ASRRd : AVR::LSRRd;
2149 } else {
2150 BuildMI(*BB, MI, dl, TII.get(AVR::RORRd), Out).addReg(In, 0, InSubreg);
2151 }
2152 Regs[I] = std::pair(Out, 0);
2153 }
2154 ShiftAmt--;
2155 }
2156
2157 if (ShiftAmt != 0) {
2158 llvm_unreachable("don't know how to shift!");
2159 }
2160}
2161
2162
2163MachineBasicBlock *
2164AVRTargetLowering::insertWideShift(MachineInstr &MI,
2165 MachineBasicBlock *BB) const {
2167 const DebugLoc &dl = MI.getDebugLoc();
2168
2169
2170
2171 int64_t ShiftAmt = MI.getOperand(4).getImm();
2173 switch (MI.getOpcode()) {
2174 case AVR::Lsl32:
2176 break;
2177 case AVR::Lsr32:
2179 break;
2180 case AVR::Asr32:
2182 break;
2183 }
2184
2185
2186 std::array<std::pair<Register, int>, 4> Registers = {
2187 std::pair(MI.getOperand(3).getReg(), AVR::sub_hi),
2188 std::pair(MI.getOperand(3).getReg(), AVR::sub_lo),
2189 std::pair(MI.getOperand(2).getReg(), AVR::sub_hi),
2190 std::pair(MI.getOperand(2).getReg(), AVR::sub_lo),
2191 };
2192
2193
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2208 (Opc != ISD::SRA || (ShiftAmt < 16 || ShiftAmt >= 22))) {
2209
2210 BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(0).getReg())
2212 .addImm(AVR::sub_lo)
2214 .addImm(AVR::sub_hi);
2215 BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(1).getReg())
2217 .addImm(AVR::sub_lo)
2219 .addImm(AVR::sub_hi);
2220 } else {
2221
2222 BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(1).getReg())
2224 .addImm(AVR::sub_hi)
2226 .addImm(AVR::sub_lo);
2227 BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(0).getReg())
2229 .addImm(AVR::sub_hi)
2231 .addImm(AVR::sub_lo);
2232 }
2233
2234
2235 MI.eraseFromParent();
2236 return BB;
2237}
2238
2240 if (I->getOpcode() == AVR::COPY) {
2241 Register SrcReg = I->getOperand(1).getReg();
2242 return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
2243 }
2244
2245 return false;
2246}
2247
2248
2249
2250
2251MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI,
2252 MachineBasicBlock *BB) const {
2255 ++I;
2257 ++I;
2259 ++I;
2260 BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::EORRdRr), AVR::R1)
2263 return BB;
2264}
2265
2266
2267MachineBasicBlock *
2268AVRTargetLowering::insertCopyZero(MachineInstr &MI,
2269 MachineBasicBlock *BB) const {
2272 BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::COPY))
2275 MI.eraseFromParent();
2276 return BB;
2277}
2278
2279
2280
2281MachineBasicBlock *AVRTargetLowering::insertAtomicArithmeticOp(
2282 MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode, int Width) const {
2283 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297 const TargetRegisterClass *RC =
2298 (Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;
2299 unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
2300 unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
2301
2302
2306
2307
2308 BuildMI(*BB, I, dl, TII.get(LoadOpcode), MI.getOperand(0).getReg())
2310
2311
2313 BuildMI(*BB, I, dl, TII.get(Opcode), Result)
2314 .addReg(MI.getOperand(0).getReg())
2316
2317
2318 BuildMI(*BB, I, dl, TII.get(StoreOpcode))
2321
2322
2323 BuildMI(*BB, I, dl, TII.get(AVR::OUTARr))
2326
2327
2328 MI.eraseFromParent();
2329 return BB;
2330}
2331
2332MachineBasicBlock *
2335 int Opc = MI.getOpcode();
2337
2338
2339
2340 switch (Opc) {
2341 case AVR::Lsl8:
2342 case AVR::Lsl16:
2343 case AVR::Lsr8:
2344 case AVR::Lsr16:
2345 case AVR::Rol8:
2346 case AVR::Rol16:
2347 case AVR::Ror8:
2348 case AVR::Ror16:
2349 case AVR::Asr8:
2350 case AVR::Asr16:
2351 return insertShift(MI, MBB, STI.hasTinyEncoding());
2352 case AVR::Lsl32:
2353 case AVR::Lsr32:
2354 case AVR::Asr32:
2355 return insertWideShift(MI, MBB);
2356 case AVR::MULRdRr:
2357 case AVR::MULSRdRr:
2358 return insertMul(MI, MBB);
2359 case AVR::CopyZero:
2360 return insertCopyZero(MI, MBB);
2361 case AVR::AtomicLoadAdd8:
2362 return insertAtomicArithmeticOp(MI, MBB, AVR::ADDRdRr, 8);
2363 case AVR::AtomicLoadAdd16:
2364 return insertAtomicArithmeticOp(MI, MBB, AVR::ADDWRdRr, 16);
2365 case AVR::AtomicLoadSub8:
2366 return insertAtomicArithmeticOp(MI, MBB, AVR::SUBRdRr, 8);
2367 case AVR::AtomicLoadSub16:
2368 return insertAtomicArithmeticOp(MI, MBB, AVR::SUBWRdRr, 16);
2369 case AVR::AtomicLoadAnd8:
2370 return insertAtomicArithmeticOp(MI, MBB, AVR::ANDRdRr, 8);
2371 case AVR::AtomicLoadAnd16:
2372 return insertAtomicArithmeticOp(MI, MBB, AVR::ANDWRdRr, 16);
2373 case AVR::AtomicLoadOr8:
2374 return insertAtomicArithmeticOp(MI, MBB, AVR::ORRdRr, 8);
2375 case AVR::AtomicLoadOr16:
2376 return insertAtomicArithmeticOp(MI, MBB, AVR::ORWRdRr, 16);
2377 case AVR::AtomicLoadXor8:
2378 return insertAtomicArithmeticOp(MI, MBB, AVR::EORRdRr, 8);
2379 case AVR::AtomicLoadXor16:
2380 return insertAtomicArithmeticOp(MI, MBB, AVR::EORWRdRr, 16);
2381 }
2382
2383 assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
2384 "Unexpected instr type to insert");
2385
2387 ->getParent()
2388 ->getSubtarget()
2389 .getInstrInfo();
2391
2392
2393
2394
2395
2396
2397
2399 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
2401
2402
2403
2404
2405 if (FallThrough != nullptr) {
2407 }
2408
2411
2413 for (I = MF->begin(); I != MF->end() && &(*I) != MBB; ++I)
2414 ;
2416 ++I;
2419
2420
2421 unsigned CallFrameSize = TII.getCallFrameSizeAt(MI);
2424
2425
2426
2427
2431
2435 MBB->addSuccessor(falseMBB);
2436 MBB->addSuccessor(trueMBB);
2437
2438
2441
2442
2443 BuildMI(*trueMBB, trueMBB->begin(), dl, TII.get(AVR::PHI),
2444 MI.getOperand(0).getReg())
2445 .addReg(MI.getOperand(1).getReg())
2447 .addReg(MI.getOperand(2).getReg())
2449
2450 MI.eraseFromParent();
2451 return trueMBB;
2452}
2453
2454
2455
2456
2457
2460 if (Constraint.size() == 1) {
2461
2462 switch (Constraint[0]) {
2463 default:
2464 break;
2465 case 'a':
2466 case 'b':
2467 case 'd':
2468 case 'l':
2469 case 'e':
2470 case 'q':
2471 case 'r':
2472 case 'w':
2474 case 't':
2475 case 'x':
2476 case 'X':
2477 case 'y':
2478 case 'Y':
2479 case 'z':
2480 case 'Z':
2482 case 'Q':
2484 case 'G':
2485 case 'I':
2486 case 'J':
2487 case 'K':
2488 case 'L':
2489 case 'M':
2490 case 'N':
2491 case 'O':
2492 case 'P':
2493 case 'R':
2495 }
2496 }
2497
2499}
2500
2503
2504
2505 switch (ConstraintCode[0]) {
2506 case 'Q':
2508 }
2510}
2511
2516 Value *CallOperandVal = info.CallOperandVal;
2517
2518
2519
2520
2521 if (!CallOperandVal) {
2523 }
2524
2525
2526 switch (*constraint) {
2527 default:
2529 break;
2530 case 'd':
2531 case 'r':
2532 case 'l':
2534 break;
2535 case 'a':
2536 case 'b':
2537 case 'e':
2538 case 'q':
2539 case 't':
2540 case 'w':
2541 case 'x':
2542 case 'X':
2543 case 'y':
2544 case 'Y':
2545 case 'z':
2546 case 'Z':
2548 break;
2549 case 'G':
2550 if (const ConstantFP *C = dyn_cast(CallOperandVal)) {
2551 if (C->isZero()) {
2553 }
2554 }
2555 break;
2556 case 'I':
2557 if (const ConstantInt *C = dyn_cast(CallOperandVal)) {
2558 if (isUInt<6>(C->getZExtValue())) {
2560 }
2561 }
2562 break;
2563 case 'J':
2564 if (const ConstantInt *C = dyn_cast(CallOperandVal)) {
2565 if ((C->getSExtValue() >= -63) && (C->getSExtValue() <= 0)) {
2567 }
2568 }
2569 break;
2570 case 'K':
2571 if (const ConstantInt *C = dyn_cast(CallOperandVal)) {
2572 if (C->getZExtValue() == 2) {
2574 }
2575 }
2576 break;
2577 case 'L':
2578 if (const ConstantInt *C = dyn_cast(CallOperandVal)) {
2579 if (C->getZExtValue() == 0) {
2581 }
2582 }
2583 break;
2584 case 'M':
2585 if (const ConstantInt *C = dyn_cast(CallOperandVal)) {
2586 if (isUInt<8>(C->getZExtValue())) {
2588 }
2589 }
2590 break;
2591 case 'N':
2592 if (const ConstantInt *C = dyn_cast(CallOperandVal)) {
2593 if (C->getSExtValue() == -1) {
2595 }
2596 }
2597 break;
2598 case 'O':
2599 if (const ConstantInt *C = dyn_cast(CallOperandVal)) {
2600 if ((C->getZExtValue() == 8) || (C->getZExtValue() == 16) ||
2601 (C->getZExtValue() == 24)) {
2603 }
2604 }
2605 break;
2606 case 'P':
2607 if (const ConstantInt *C = dyn_cast(CallOperandVal)) {
2608 if (C->getZExtValue() == 1) {
2610 }
2611 }
2612 break;
2613 case 'R':
2614 if (const ConstantInt *C = dyn_cast(CallOperandVal)) {
2615 if ((C->getSExtValue() >= -6) && (C->getSExtValue() <= 5)) {
2617 }
2618 }
2619 break;
2620 case 'Q':
2622 break;
2623 }
2624
2625 return weight;
2626}
2627
2628std::pair<unsigned, const TargetRegisterClass *>
2631 MVT VT) const {
2632 if (Constraint.size() == 1) {
2633 switch (Constraint[0]) {
2634 case 'a':
2635 if (VT == MVT::i8)
2636 return std::make_pair(0U, &AVR::LD8loRegClass);
2637 else if (VT == MVT::i16)
2638 return std::make_pair(0U, &AVR::DREGSLD8loRegClass);
2639 break;
2640 case 'b':
2641 if (VT == MVT::i8 || VT == MVT::i16)
2642 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
2643 break;
2644 case 'd':
2645 if (VT == MVT::i8)
2646 return std::make_pair(0U, &AVR::LD8RegClass);
2647 else if (VT == MVT::i16)
2648 return std::make_pair(0U, &AVR::DLDREGSRegClass);
2649 break;
2650 case 'l':
2651 if (VT == MVT::i8)
2652 return std::make_pair(0U, &AVR::GPR8loRegClass);
2653 else if (VT == MVT::i16)
2654 return std::make_pair(0U, &AVR::DREGSloRegClass);
2655 break;
2656 case 'e':
2657 if (VT == MVT::i8 || VT == MVT::i16)
2658 return std::make_pair(0U, &AVR::PTRREGSRegClass);
2659 break;
2660 case 'q':
2661 return std::make_pair(0U, &AVR::GPRSPRegClass);
2662 case 'r':
2663 if (VT == MVT::i8)
2664 return std::make_pair(0U, &AVR::GPR8RegClass);
2665 else if (VT == MVT::i16)
2666 return std::make_pair(0U, &AVR::DREGSRegClass);
2667 break;
2668 case 't':
2669 if (VT == MVT::i8)
2671 &AVR::GPR8RegClass);
2672 break;
2673 case 'w':
2674 if (VT == MVT::i8 || VT == MVT::i16)
2675 return std::make_pair(0U, &AVR::IWREGSRegClass);
2676 break;
2677 case 'x':
2678 case 'X':
2679 if (VT == MVT::i8 || VT == MVT::i16)
2680 return std::make_pair(unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
2681 break;
2682 case 'y':
2683 case 'Y':
2684 if (VT == MVT::i8 || VT == MVT::i16)
2685 return std::make_pair(unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
2686 break;
2687 case 'z':
2688 case 'Z':
2689 if (VT == MVT::i8 || VT == MVT::i16)
2690 return std::make_pair(unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
2691 break;
2692 default:
2693 break;
2694 }
2695 }
2696
2699}
2700
2703 std::vector &Ops,
2707 EVT Ty = Op.getValueType();
2708
2709
2710 if (Constraint.size() != 1) {
2711 return;
2712 }
2713
2714 char ConstraintLetter = Constraint[0];
2715 switch (ConstraintLetter) {
2716 default:
2717 break;
2718
2719 case 'I':
2720 case 'J':
2721 case 'K':
2722 case 'L':
2723 case 'M':
2724 case 'N':
2725 case 'O':
2726 case 'P':
2727 case 'R': {
2729 if () {
2730 return;
2731 }
2732
2733 int64_t CVal64 = C->getSExtValue();
2734 uint64_t CUVal64 = C->getZExtValue();
2735 switch (ConstraintLetter) {
2736 case 'I':
2737 if (!isUInt<6>(CUVal64))
2738 return;
2740 break;
2741 case 'J':
2742 if (CVal64 < -63 || CVal64 > 0)
2743 return;
2745 break;
2746 case 'K':
2747 if (CUVal64 != 2)
2748 return;
2750 break;
2751 case 'L':
2752 if (CUVal64 != 0)
2753 return;
2755 break;
2756 case 'M':
2757 if (!isUInt<8>(CUVal64))
2758 return;
2759
2760
2761
2763 Ty = MVT::i16;
2764 }
2766 break;
2767 case 'N':
2768 if (CVal64 != -1)
2769 return;
2771 break;
2772 case 'O':
2773 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
2774 return;
2776 break;
2777 case 'P':
2778 if (CUVal64 != 1)
2779 return;
2781 break;
2782 case 'R':
2783 if (CVal64 < -6 || CVal64 > 5)
2784 return;
2786 break;
2787 }
2788
2789 break;
2790 }
2791 case 'G':
2793 if (!FC || !FC->isZero())
2794 return;
2795
2797 break;
2798 }
2799
2800 if (Result.getNode()) {
2801 Ops.push_back(Result);
2802 return;
2803 }
2804
2806}
2807
2811
2814 .Case("r0", AVR::R0)
2815 .Case("r1", AVR::R1)
2817 } else {
2819 .Case("r0", AVR::R1R0)
2820 .Case("sp", AVR::SP)
2822 }
2823
2824 if (Reg)
2825 return Reg;
2826
2829}
2830
2831}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Pre allocate WWM Registers
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
Utilities related to the AVR instruction set.
A specific AVR target MCU.
Register getTmpRegister() const
Register getZeroRegister() const
const AVRInstrInfo * getInstrInfo() const override
const AVRRegisterInfo * getRegisterInfo() const override
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
Replace a node with an illegal result type with a new node built out of custom code.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
const AVRSubtarget & Subtarget
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
AVRTargetLowering(const AVRTargetMachine &TM, const AVRSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
A generic AVR implementation.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
CCState - This class holds information needed while lowering arguments and return values.
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
LLVMContext & getContext() const
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void addLoc(const CCValAssign &V)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static auto integer_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
MutableArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDValue getValue(unsigned R) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CondCodes
AVR specific condition codes.
@ COND_SH
Unsigned same or higher.
@ COND_GE
Greater than or equal.
@ ASRWN
Word arithmetic shift right N bits.
@ RET_GLUE
Return from subroutine.
@ SWAP
Swap Rd[7:4] <-> Rd[3:0].
@ RETI_GLUE
Return from ISR.
@ LSLW
Wide logical shift left.
@ ROLLOOP
A loop of single left bit rotate instructions.
@ ASRLO
Lower 8-bit of word arithmetic shift right.
@ ASRLOOP
A loop of single arithmetic shift right instructions.
@ LSRLOOP
A loop of single logical shift right instructions.
@ LSR
Logical shift right.
@ LSRLO
Lower 8-bit of word logical shift right.
@ TST
Test for zero or minus instruction.
@ LSRBN
Byte logical shift right N bits.
@ ASRW
Wide arithmetic shift right.
@ SELECT_CC
Operand 0 and operand 1 are selection variable, operand 2 is condition code and operand 3 is flag ope...
@ CMPC
Compare with carry instruction.
@ LSLWN
Word logical shift left N bits.
@ RORLOOP
A loop of single right bit rotate instructions.
@ CMP
Compare instruction.
@ ASRBN
Byte arithmetic shift right N bits.
@ CALL
Represents an abstract call instruction, which includes a bunch of information.
@ ASR
Arithmetic shift right.
@ LSRW
Wide logical shift right.
@ LSLBN
Byte logical shift left N bits.
@ LSLHI
Higher 8-bit of word logical shift left.
@ LSRWN
Word logical shift right N bits.
@ WRAPPER
A wrapper node for TargetConstantPool, TargetExternalSymbol, and TargetGlobalAddress.
@ LSLLOOP
A loop of single logical shift left instructions.
@ BRCOND
AVR conditional branches.
bool isProgramMemoryAccess(MemSDNode const *N)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ AVR_BUILTIN
Used for special AVR rtlib functions which have an "optimized" convention to preserve registers.
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ BasicBlock
Various leaf nodes.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Undef
Value of the register doesn't matter.
Type
MessagePack types as defined in the standard, with the exception of Integer being divided into a sign...
This is an optimization pass for GlobalISel generic memory operations.
static void analyzeReturnValues(const SmallVectorImpl< ArgT > &Args, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing value of returning from a function.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static const MCPhysReg RegList16Tiny[]
static const MCPhysReg RegList8Tiny[]
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F, const DataLayout *TD, const SmallVectorImpl< ArgT > &Args, SmallVectorImpl< CCValAssign > &ArgLocs, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing function arguments.
static const MCPhysReg RegList16AVR[]
static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl< ArgT > &Args)
Count the total number of bytes needed to pass or return these arguments.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC)
IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
static bool isCopyMulResult(MachineBasicBlock::iterator const &I)
static void insertMultibyteShift(MachineInstr &MI, MachineBasicBlock *BB, MutableArrayRef< std::pair< Register, int > > Regs, ISD::NodeType Opc, int64_t ShiftAmt)
static const MCPhysReg RegList8AVR[]
Registers for calling conventions, ordered in reverse as required by ABI.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isVector() const
Return true if this is a vector value type.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.