LLVM: lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
37#include
38#include
39#include
40
41using namespace llvm;
42
43#define AARCH64_EXPAND_PSEUDO_NAME "AArch64 pseudo instruction expansion pass"
44
45namespace {
46
48public:
50
51 static char ID;
52
55 }
56
58
60
61private:
69 unsigned ContiguousOpc, unsigned StridedOpc);
73 unsigned Size);
75 unsigned BitSize);
76
80 unsigned LdarOp, unsigned StlrOp, unsigned CmpOp,
81 unsigned ExtendImm, unsigned ZeroReg,
91 unsigned N);
101};
102
103}
104
105char AArch64ExpandPseudo::ID = 0;
106
109
110
111
117 assert(MO.isReg() && MO.getReg());
118 if (MO.isUse())
120 else
122 }
123}
124
125
126
129 unsigned BitSize) {
131 Register DstReg = MI.getOperand(0).getReg();
135
136 if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
137
138
139 MI.eraseFromParent();
140 return true;
141 }
142
146
148 for (auto I = Insn.begin(), E = Insn.end(); I != E; ++I) {
149 bool LastItem = std::next(I) == E;
150 switch (I->Opcode)
151 {
153
154 case AArch64::ORRWri:
155 case AArch64::ORRXri:
156 if (I->Op1 == 0) {
159 .addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
161 } else {
162 Register DstReg = MI.getOperand(0).getReg();
163 bool DstIsDead = MI.getOperand(0).isDead();
168 RenamableState)
169 .addReg(DstReg)
170 .addImm(I->Op2));
171 }
172 break;
173 case AArch64::ORRWrs:
174 case AArch64::ORRXrs: {
175 Register DstReg = MI.getOperand(0).getReg();
176 bool DstIsDead = MI.getOperand(0).isDead();
181 RenamableState)
182 .addReg(DstReg)
183 .addReg(DstReg)
184 .addImm(I->Op2));
185 } break;
186 case AArch64::ANDXri:
187 case AArch64::EORXri:
188 if (I->Op1 == 0) {
191 .addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
193 } else {
194 Register DstReg = MI.getOperand(0).getReg();
195 bool DstIsDead = MI.getOperand(0).isDead();
200 RenamableState)
201 .addReg(DstReg)
202 .addImm(I->Op2));
203 }
204 break;
205 case AArch64::MOVNWi:
206 case AArch64::MOVNXi:
207 case AArch64::MOVZWi:
208 case AArch64::MOVZXi: {
209 bool DstIsDead = MI.getOperand(0).isDead();
213 RenamableState)
214 .addImm(I->Op1)
216 } break;
217 case AArch64::MOVKWi:
218 case AArch64::MOVKXi: {
219 Register DstReg = MI.getOperand(0).getReg();
220 bool DstIsDead = MI.getOperand(0).isDead();
225 RenamableState)
226 .addReg(DstReg)
227 .addImm(I->Op1)
229 } break;
230 }
231 }
232 transferImpOps(MI, MIBS.front(), MIBS.back());
233 MI.eraseFromParent();
234 return true;
235}
236
237bool AArch64ExpandPseudo::expandCMP_SWAP(
239 unsigned StlrOp, unsigned CmpOp, unsigned ExtendImm, unsigned ZeroReg,
244 Register StatusReg = MI.getOperand(1).getReg();
245 bool StatusDead = MI.getOperand(1).isDead();
246
247
248 assert(.getOperand(2).isUndef() && "cannot handle undef");
249 Register AddrReg = MI.getOperand(2).getReg();
250 Register DesiredReg = MI.getOperand(3).getReg();
251 Register NewReg = MI.getOperand(4).getReg();
252
257
259 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
260 MF->insert(++StoreBB->getIterator(), DoneBB);
261
262
263
264
265
266
267 if (!StatusDead)
268 BuildMI(LoadCmpBB, MIMD, TII->get(AArch64::MOVZWi), StatusReg)
272 BuildMI(LoadCmpBB, MIMD, TII->get(CmpOp), ZeroReg)
276 BuildMI(LoadCmpBB, MIMD, TII->get(AArch64::Bcc))
280 LoadCmpBB->addSuccessor(DoneBB);
281 LoadCmpBB->addSuccessor(StoreBB);
282
283
284
285
286 BuildMI(StoreBB, MIMD, TII->get(StlrOp), StatusReg)
289 BuildMI(StoreBB, MIMD, TII->get(AArch64::CBNZW))
292 StoreBB->addSuccessor(LoadCmpBB);
293 StoreBB->addSuccessor(DoneBB);
294
295 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
296 DoneBB->transferSuccessors(&MBB);
297
299
301 MI.eraseFromParent();
302
303
308
309 StoreBB->clearLiveIns();
311 LoadCmpBB->clearLiveIns();
313
314 return true;
315}
316
317bool AArch64ExpandPseudo::expandCMP_SWAP_128(
324 Register StatusReg = MI.getOperand(2).getReg();
325 bool StatusDead = MI.getOperand(2).isDead();
326
327
328 assert(.getOperand(3).isUndef() && "cannot handle undef");
329 Register AddrReg = MI.getOperand(3).getReg();
330 Register DesiredLoReg = MI.getOperand(4).getReg();
331 Register DesiredHiReg = MI.getOperand(5).getReg();
332 Register NewLoReg = MI.getOperand(6).getReg();
333 Register NewHiReg = MI.getOperand(7).getReg();
334
335 unsigned LdxpOp, StxpOp;
336
337 switch (MI.getOpcode()) {
338 case AArch64::CMP_SWAP_128_MONOTONIC:
339 LdxpOp = AArch64::LDXPX;
340 StxpOp = AArch64::STXPX;
341 break;
342 case AArch64::CMP_SWAP_128_RELEASE:
343 LdxpOp = AArch64::LDXPX;
344 StxpOp = AArch64::STLXPX;
345 break;
346 case AArch64::CMP_SWAP_128_ACQUIRE:
347 LdxpOp = AArch64::LDAXPX;
348 StxpOp = AArch64::STXPX;
349 break;
350 case AArch64::CMP_SWAP_128:
351 LdxpOp = AArch64::LDAXPX;
352 StxpOp = AArch64::STLXPX;
353 break;
354 default:
356 }
357
363
365 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
366 MF->insert(++StoreBB->getIterator(), FailBB);
367 MF->insert(++FailBB->getIterator(), DoneBB);
368
369
370
371
372
373
374 BuildMI(LoadCmpBB, MIMD, TII->get(LdxpOp))
378 BuildMI(LoadCmpBB, MIMD, TII->get(AArch64::SUBSXrs), AArch64::XZR)
380 .addReg(DesiredLoReg)
382 BuildMI(LoadCmpBB, MIMD, TII->get(AArch64::CSINCWr), StatusReg)
383 .addUse(AArch64::WZR)
384 .addUse(AArch64::WZR)
386 BuildMI(LoadCmpBB, MIMD, TII->get(AArch64::SUBSXrs), AArch64::XZR)
388 .addReg(DesiredHiReg)
390 BuildMI(LoadCmpBB, MIMD, TII->get(AArch64::CSINCWr), StatusReg)
394 BuildMI(LoadCmpBB, MIMD, TII->get(AArch64::CBNZW))
397 LoadCmpBB->addSuccessor(FailBB);
398 LoadCmpBB->addSuccessor(StoreBB);
399
400
401
402
403 BuildMI(StoreBB, MIMD, TII->get(StxpOp), StatusReg)
407 BuildMI(StoreBB, MIMD, TII->get(AArch64::CBNZW))
411 StoreBB->addSuccessor(LoadCmpBB);
412 StoreBB->addSuccessor(DoneBB);
413
414
415
416
417 BuildMI(FailBB, MIMD, TII->get(StxpOp), StatusReg)
421 BuildMI(FailBB, MIMD, TII->get(AArch64::CBNZW))
424 FailBB->addSuccessor(LoadCmpBB);
425 FailBB->addSuccessor(DoneBB);
426
427 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
428 DoneBB->transferSuccessors(&MBB);
429
431
433 MI.eraseFromParent();
434
435
441
442
443 FailBB->clearLiveIns();
445 StoreBB->clearLiveIns();
447 LoadCmpBB->clearLiveIns();
449
450 return true;
451}
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491bool AArch64ExpandPseudo::expand_DestructiveOp(
499 Register DstReg = MI.getOperand(0).getReg();
500 bool DstIsDead = MI.getOperand(0).isDead();
501 bool UseRev = false;
502 unsigned PredIdx, DOPIdx, SrcIdx, Src2Idx;
503
504 switch (DType) {
507 if (DstReg == MI.getOperand(3).getReg()) {
508
509 std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 3, 2);
510 UseRev = true;
511 break;
512 }
513 [[fallthrough]];
516 std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 2, 3);
517 break;
519 std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(2, 3, 3);
520 break;
522 std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 2, 3, 4);
523 if (DstReg == MI.getOperand(3).getReg()) {
524
525 std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 3, 4, 2);
526 UseRev = true;
527 } else if (DstReg == MI.getOperand(4).getReg()) {
528
529 std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 4, 3, 2);
530 UseRev = true;
531 }
532 break;
533 default:
535 }
536
537
538
539
540 bool DOPRegIsUnique = false;
541 switch (DType) {
543 DOPRegIsUnique = DstReg != MI.getOperand(SrcIdx).getReg();
544 break;
547 DOPRegIsUnique =
548 DstReg != MI.getOperand(DOPIdx).getReg() ||
549 MI.getOperand(DOPIdx).getReg() != MI.getOperand(SrcIdx).getReg();
550 break;
553 DOPRegIsUnique = true;
554 break;
556 DOPRegIsUnique =
557 DstReg != MI.getOperand(DOPIdx).getReg() ||
558 (MI.getOperand(DOPIdx).getReg() != MI.getOperand(SrcIdx).getReg() &&
559 MI.getOperand(DOPIdx).getReg() != MI.getOperand(Src2Idx).getReg());
560 break;
561 }
562
563
564 if (UseRev) {
565 int NewOpcode;
566
568 Opcode = NewOpcode;
569
571 Opcode = NewOpcode;
572 }
573
574
575 uint64_t ElementSize = TII->getElementSizeForOpcode(Opcode);
576 unsigned MovPrfx, LSLZero, MovPrfxZero;
577 switch (ElementSize) {
580 MovPrfx = AArch64::MOVPRFX_ZZ;
581 LSLZero = AArch64::LSL_ZPmI_B;
582 MovPrfxZero = AArch64::MOVPRFX_ZPzZ_B;
583 break;
585 MovPrfx = AArch64::MOVPRFX_ZZ;
586 LSLZero = AArch64::LSL_ZPmI_H;
587 MovPrfxZero = AArch64::MOVPRFX_ZPzZ_H;
588 break;
590 MovPrfx = AArch64::MOVPRFX_ZZ;
591 LSLZero = AArch64::LSL_ZPmI_S;
592 MovPrfxZero = AArch64::MOVPRFX_ZPzZ_S;
593 break;
595 MovPrfx = AArch64::MOVPRFX_ZZ;
596 LSLZero = AArch64::LSL_ZPmI_D;
597 MovPrfxZero = AArch64::MOVPRFX_ZPzZ_D;
598 break;
599 default:
601 }
602
603
604
605
607 if (FalseZero) {
608
609
613 "The destructive operand should be unique");
615 "This instruction is unpredicated");
616
617
620 .addReg(MI.getOperand(PredIdx).getReg())
621 .addReg(MI.getOperand(DOPIdx).getReg());
622
623
624 DOPIdx = 0;
625
626
627
628
632 !DOPRegIsUnique) {
635 .add(MI.getOperand(PredIdx))
638 }
639 } else if (DstReg != MI.getOperand(DOPIdx).getReg()) {
640 assert(DOPRegIsUnique && "The destructive operand should be unique");
643 .addReg(MI.getOperand(DOPIdx).getReg());
644 DOPIdx = 0;
645 }
646
647
648
649
652
653 switch (DType) {
656 .add(MI.getOperand(PredIdx))
657 .add(MI.getOperand(SrcIdx));
658 break;
663 DOP.add(MI.getOperand(PredIdx))
665 .add(MI.getOperand(SrcIdx));
666 break;
668 DOP.add(MI.getOperand(PredIdx))
670 .add(MI.getOperand(SrcIdx))
671 .add(MI.getOperand(Src2Idx));
672 break;
673 }
674
675 if (PRFX) {
677 transferImpOps(MI, PRFX, DOP);
678 } else
679 transferImpOps(MI, DOP, DOP);
680
681 MI.eraseFromParent();
682 return true;
683}
684
685bool AArch64ExpandPseudo::expandSetTagLoop(
690 Register SizeReg = MI.getOperand(0).getReg();
691 Register AddressReg = MI.getOperand(1).getReg();
692
694
695 bool ZeroData = MI.getOpcode() == AArch64::STZGloop_wback;
696 const unsigned OpCode1 =
697 ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex;
698 const unsigned OpCode2 =
699 ZeroData ? AArch64::STZ2GPostIndex : AArch64::ST2GPostIndex;
700
701 unsigned Size = MI.getOperand(2).getImm();
703 if (Size % (16 * 2) != 0) {
709 }
714
717
719 MF->insert(++LoopBB->getIterator(), DoneBB);
720
728 BuildMI(LoopBB, DL, TII->get(AArch64::SUBSXri))
737
738 LoopBB->addSuccessor(LoopBB);
739 LoopBB->addSuccessor(DoneBB);
740
741 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
742 DoneBB->transferSuccessors(&MBB);
743
745
747 MI.eraseFromParent();
748
752
753
754 LoopBB->clearLiveIns();
756 DoneBB->clearLiveIns();
758
759 return true;
760}
761
764 unsigned Opc, unsigned N) {
765 assert((Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI ||
766 Opc == AArch64::LDR_PXI || Opc == AArch64::STR_PXI) &&
767 "Unexpected opcode");
768 unsigned RState = (Opc == AArch64::LDR_ZXI || Opc == AArch64::LDR_PXI)
770 : 0;
771 unsigned sub0 = (Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI)
772 ? AArch64::zsub0
773 : AArch64::psub0;
778 int ImmOffset = MI.getOperand(2).getImm() + Offset;
779 bool Kill = (Offset + 1 == N) ? MI.getOperand(1).isKill() : false;
780 assert(ImmOffset >= -256 && ImmOffset < 256 &&
781 "Immediate spill offset out of range");
783 .addReg(TRI->getSubReg(MI.getOperand(0).getReg(), sub0 + Offset),
784 RState)
787 }
788 MI.eraseFromParent();
789 return true;
790}
791
792
793
797 unsigned Opcode,
799 unsigned RegMaskStartIdx) {
800
802 .add(ExplicitOps)
804
805
806
807
808 while (->getOperand(RegMaskStartIdx).isRegMask()) {
810 assert(MOP.isReg() && "can only add register operands");
812 MOP.getReg(), false, true, false,
813 false, MOP.isUndef()));
814 RegMaskStartIdx++;
815 }
818 Call->addOperand(MO);
819
820 return Call;
821}
822
823
824
829 unsigned RegMaskStartIdx) {
830 unsigned Opc = CallTarget.isGlobal() ? AArch64::BL : AArch64::BLR;
831
833 "invalid operand for regular call");
835}
836
837bool AArch64ExpandPseudo::expandCALL_RVMARKER(
839
840
841
842
843
846 assert(RVTarget.isGlobal() && "invalid operand for attached call");
847
849
850 if (MI.getOpcode() == AArch64::BLRA_RVMARKER) {
851
856
859 "Invalid auth call key");
860
862
864 5);
865 } else {
866 assert(MI.getOpcode() == AArch64::BLR_RVMARKER && "unknown rvmarker MI");
868
869 2);
870 }
871
874 .addReg(AArch64::XZR)
877
878 auto *RVCall = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::BL))
879 .add(RVTarget)
881
882 if (MI.shouldUpdateAdditionalCallInfo())
884
885 MI.eraseFromParent();
887 std::next(RVCall->getIterator()));
888 return true;
889}
890
893
894
895
896
897
900
901 1);
902
904
907
910
911 if (MI.shouldUpdateAdditionalCallInfo())
913
914 MI.eraseFromParent();
916 return true;
917}
918
919bool AArch64ExpandPseudo::expandStoreSwiftAsyncContext(
921 Register CtxReg = MBBI->getOperand(0).getReg();
922 Register BaseReg = MBBI->getOperand(1).getReg();
923 int Offset = MBBI->getOperand(2).getImm();
926
927 if (STI.getTargetTriple().getArchName() != "arm64e") {
934 return true;
935 }
936
937
938
939
940
941
942
943
944 unsigned Opc = Offset >= 0 ? AArch64::ADDXri : AArch64::SUBXri;
951 .addUse(AArch64::X16)
955
956
958 .addUse(AArch64::XZR)
963 .addUse(AArch64::X17)
964 .addUse(AArch64::X16)
967 .addUse(AArch64::X17)
971
973 return true;
974}
975
981 MI.getParent()->successors().begin() !=
982 MI.getParent()->successors().end()) &&
983 "Unexpected unreachable in block that restores ZA");
984
985
989
990
991
992
993
998 : SMBB->splitAt(MI, true);
999
1000
1005
1006
1010 for (unsigned I = 2; I < MI.getNumOperands(); ++I)
1011 MIB.add(MI.getOperand(I));
1013
1014 MI.eraseFromParent();
1015 return EndBB;
1016}
1017
1022
1023
1024
1025
1026
1028 MI.getParent()->successors().begin() ==
1029 MI.getParent()->successors().end()) {
1030 MI.eraseFromParent();
1031 return &MBB;
1032 }
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1065
1066
1067
1068
1069
1070
1071
1072 unsigned Opc;
1073 switch (MI.getOperand(2).getImm()) {
1075 llvm_unreachable("Should have matched to instruction directly");
1077 Opc = AArch64::TBNZW;
1078 break;
1080 Opc = AArch64::TBZW;
1081 break;
1082 }
1083 auto PStateSM = MI.getOperand(3).getReg();
1085 unsigned SMReg32 = TRI->getSubReg(PStateSM, AArch64::sub_32);
1088
1089
1090
1091
1092
1097 : SMBB->splitAt(MI, true);
1098
1099
1104
1105
1107 TII->get(AArch64::MSRpstatesvcrImm1));
1108
1109
1110
1111 MIB.add(MI.getOperand(0));
1112 MIB.add(MI.getOperand(1));
1113 for (unsigned i = 4; i < MI.getNumOperands(); ++i)
1114 MIB.add(MI.getOperand(i));
1115
1117
1118 MI.eraseFromParent();
1119 return EndBB;
1120}
1121
1122bool AArch64ExpandPseudo::expandMultiVecPseudo(
1125 unsigned ContiguousOp, unsigned StridedOpc) {
1127 Register Tuple = MI.getOperand(0).getReg();
1128
1129 auto ContiguousRange = ContiguousClass.getRegisters();
1130 auto StridedRange = StridedClass.getRegisters();
1131 unsigned Opc;
1133 Opc = ContiguousOp;
1135 Opc = StridedOpc;
1136 } else
1138
1144 transferImpOps(MI, MIB, MIB);
1145 MI.eraseFromParent();
1146 return true;
1147}
1148
1149bool AArch64ExpandPseudo::expandFormTuplePseudo(
1152 assert((Size == 2 || Size == 4) && "Invalid Tuple Size");
1154 Register ReturnTuple = MI.getOperand(0).getReg();
1155
1158 for (unsigned I = 0; I < Size; ++I) {
1159 Register FormTupleOpReg = MI.getOperand(I + 1).getReg();
1160 Register ReturnTupleSubReg =
1161 TRI->getSubReg(ReturnTuple, AArch64::zsub0 + I);
1162
1163
1164 if (FormTupleOpReg != ReturnTupleSubReg)
1167 .addReg(FormTupleOpReg)
1168 .addReg(FormTupleOpReg);
1169 }
1170
1171 MI.eraseFromParent();
1172 return true;
1173}
1174
1175
1176
1181 unsigned Opcode = MI.getOpcode();
1182
1183
1185 if (OrigInstr != -1) {
1186 auto &Orig = TII->get(OrigInstr);
1189 return expand_DestructiveOp(MI, MBB, MBBI);
1190 }
1191 }
1192
1193 switch (Opcode) {
1194 default:
1195 break;
1196
1197 case AArch64::BSPv8i8:
1198 case AArch64::BSPv16i8: {
1199 Register DstReg = MI.getOperand(0).getReg();
1200 if (DstReg == MI.getOperand(3).getReg()) {
1201
1203 TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BITv8i8
1204 : AArch64::BITv16i8))
1209 } else if (DstReg == MI.getOperand(2).getReg()) {
1210
1212 TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BIFv8i8
1213 : AArch64::BIFv16i8))
1218 } else {
1219
1220 if (DstReg == MI.getOperand(1).getReg()) {
1222 TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8
1223 : AArch64::BSLv16i8))
1228 } else {
1230 TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::ORRv8i8
1231 : AArch64::ORRv16i8))
1238 TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8
1239 : AArch64::BSLv16i8))
1246 }
1247 }
1248 MI.eraseFromParent();
1249 return true;
1250 }
1251
1252 case AArch64::ADDWrr:
1253 case AArch64::SUBWrr:
1254 case AArch64::ADDXrr:
1255 case AArch64::SUBXrr:
1256 case AArch64::ADDSWrr:
1257 case AArch64::SUBSWrr:
1258 case AArch64::ADDSXrr:
1259 case AArch64::SUBSXrr:
1260 case AArch64::ANDWrr:
1261 case AArch64::ANDXrr:
1262 case AArch64::BICWrr:
1263 case AArch64::BICXrr:
1264 case AArch64::ANDSWrr:
1265 case AArch64::ANDSXrr:
1266 case AArch64::BICSWrr:
1267 case AArch64::BICSXrr:
1268 case AArch64::EONWrr:
1269 case AArch64::EONXrr:
1270 case AArch64::EORWrr:
1271 case AArch64::EORXrr:
1272 case AArch64::ORNWrr:
1273 case AArch64::ORNXrr:
1274 case AArch64::ORRWrr:
1275 case AArch64::ORRXrr: {
1276 unsigned Opcode;
1277 switch (MI.getOpcode()) {
1278 default:
1279 return false;
1280 case AArch64::ADDWrr: Opcode = AArch64::ADDWrs; break;
1281 case AArch64::SUBWrr: Opcode = AArch64::SUBWrs; break;
1282 case AArch64::ADDXrr: Opcode = AArch64::ADDXrs; break;
1283 case AArch64::SUBXrr: Opcode = AArch64::SUBXrs; break;
1284 case AArch64::ADDSWrr: Opcode = AArch64::ADDSWrs; break;
1285 case AArch64::SUBSWrr: Opcode = AArch64::SUBSWrs; break;
1286 case AArch64::ADDSXrr: Opcode = AArch64::ADDSXrs; break;
1287 case AArch64::SUBSXrr: Opcode = AArch64::SUBSXrs; break;
1288 case AArch64::ANDWrr: Opcode = AArch64::ANDWrs; break;
1289 case AArch64::ANDXrr: Opcode = AArch64::ANDXrs; break;
1290 case AArch64::BICWrr: Opcode = AArch64::BICWrs; break;
1291 case AArch64::BICXrr: Opcode = AArch64::BICXrs; break;
1292 case AArch64::ANDSWrr: Opcode = AArch64::ANDSWrs; break;
1293 case AArch64::ANDSXrr: Opcode = AArch64::ANDSXrs; break;
1294 case AArch64::BICSWrr: Opcode = AArch64::BICSWrs; break;
1295 case AArch64::BICSXrr: Opcode = AArch64::BICSXrs; break;
1296 case AArch64::EONWrr: Opcode = AArch64::EONWrs; break;
1297 case AArch64::EONXrr: Opcode = AArch64::EONXrs; break;
1298 case AArch64::EORWrr: Opcode = AArch64::EORWrs; break;
1299 case AArch64::EORXrr: Opcode = AArch64::EORXrs; break;
1300 case AArch64::ORNWrr: Opcode = AArch64::ORNWrs; break;
1301 case AArch64::ORNXrr: Opcode = AArch64::ORNXrs; break;
1302 case AArch64::ORRWrr: Opcode = AArch64::ORRWrs; break;
1303 case AArch64::ORRXrr: Opcode = AArch64::ORRXrs; break;
1304 }
1306
1308 TII->get(Opcode), MI.getDebugLoc(), true);
1311 MIB1->setPCSections(MF, MI.getPCSections());
1313 .add(MI.getOperand(1))
1314 .add(MI.getOperand(2))
1316 transferImpOps(MI, MIB1, MIB1);
1317 if (auto DebugNumber = MI.peekDebugInstrNum())
1319 MI.eraseFromParent();
1320 return true;
1321 }
1322
1323 case AArch64::LOADgot: {
1325 Register DstReg = MI.getOperand(0).getReg();
1328
1330
1332 TII->get(AArch64::LDRXl), DstReg);
1333
1336 } else if (MO1.isSymbol()) {
1338 } else {
1340 "Only expect globals, externalsymbols, or constant pools");
1342 }
1343 } else {
1344
1349
1353 unsigned Reg32 = TRI->getSubReg(DstReg, AArch64::sub_32);
1354 unsigned DstFlags = MI.getOperand(0).getTargetFlags();
1359 } else {
1360 Register DstReg = MI.getOperand(0).getReg();
1364 }
1365
1370 } else if (MO1.isSymbol()) {
1375 } else {
1377 "Only expect globals, externalsymbols, or constant pools");
1383 }
1384
1385 transferImpOps(MI, MIB1, MIB2);
1386 }
1387 MI.eraseFromParent();
1388 return true;
1389 }
1390 case AArch64::MOVaddrBA: {
1393
1394
1395
1396 const BlockAddress *BA = MI.getOperand(1).getBlockAddress();
1397 assert(MI.getOperand(1).getOffset() == 0 && "unexpected offset");
1398
1401
1402 Register DstReg = MI.getOperand(0).getReg();
1403 auto MIB1 =
1407 TII->get(AArch64::LDRXui), DstReg)
1411 transferImpOps(MI, MIB1, MIB2);
1412 MI.eraseFromParent();
1413 return true;
1414 }
1415 }
1416 [[fallthrough]];
1417 case AArch64::MOVaddr:
1418 case AArch64::MOVaddrJT:
1419 case AArch64::MOVaddrCP:
1420 case AArch64::MOVaddrTLS:
1421 case AArch64::MOVaddrEXT: {
1422
1423 Register DstReg = MI.getOperand(0).getReg();
1424 assert(DstReg != AArch64::XZR);
1428
1430
1431
1432
1433
1434
1435
1436
1437 auto Tag = MI.getOperand(1);
1439 Tag.setOffset(0x100000000);
1442 .add(Tag)
1444 }
1445
1452
1453 transferImpOps(MI, MIB1, MIB2);
1454 MI.eraseFromParent();
1455 return true;
1456 }
1457 case AArch64::ADDlowTLS:
1458
1464 MI.eraseFromParent();
1465 return true;
1466
1467 case AArch64::MOVbaseTLS: {
1468 Register DstReg = MI.getOperand(0).getReg();
1469 auto SysReg = AArch64SysReg::TPIDR_EL0;
1472 SysReg = AArch64SysReg::TPIDR_EL3;
1474 SysReg = AArch64SysReg::TPIDR_EL2;
1476 SysReg = AArch64SysReg::TPIDR_EL1;
1478 SysReg = AArch64SysReg::TPIDRRO_EL0;
1481 MI.eraseFromParent();
1482 return true;
1483 }
1484
1485 case AArch64::MOVi32imm:
1487 case AArch64::MOVi64imm:
1489 case AArch64::RET_ReallyLR: {
1490
1491
1492
1493
1494
1498 transferImpOps(MI, MIB, MIB);
1499 MI.eraseFromParent();
1500 return true;
1501 }
1502 case AArch64::CMP_SWAP_8:
1503 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRB, AArch64::STLXRB,
1504 AArch64::SUBSWrx,
1506 AArch64::WZR, NextMBBI);
1507 case AArch64::CMP_SWAP_16:
1508 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRH, AArch64::STLXRH,
1509 AArch64::SUBSWrx,
1511 AArch64::WZR, NextMBBI);
1512 case AArch64::CMP_SWAP_32:
1513 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRW, AArch64::STLXRW,
1514 AArch64::SUBSWrs,
1516 AArch64::WZR, NextMBBI);
1517 case AArch64::CMP_SWAP_64:
1518 return expandCMP_SWAP(MBB, MBBI,
1519 AArch64::LDAXRX, AArch64::STLXRX, AArch64::SUBSXrs,
1521 AArch64::XZR, NextMBBI);
1522 case AArch64::CMP_SWAP_128:
1523 case AArch64::CMP_SWAP_128_RELEASE:
1524 case AArch64::CMP_SWAP_128_ACQUIRE:
1525 case AArch64::CMP_SWAP_128_MONOTONIC:
1526 return expandCMP_SWAP_128(MBB, MBBI, NextMBBI);
1527
1528 case AArch64::AESMCrrTied:
1529 case AArch64::AESIMCrrTied: {
1532 TII->get(Opcode == AArch64::AESMCrrTied ? AArch64::AESMCrr :
1533 AArch64::AESIMCrr))
1536 transferImpOps(MI, MIB, MIB);
1537 MI.eraseFromParent();
1538 return true;
1539 }
1540 case AArch64::IRGstack: {
1545
1546
1547
1548
1551 StackOffset FrameRegOffset = TFI->resolveFrameOffsetReference(
1552 MF, BaseOffset, false , false , FrameReg,
1553 false,
1554 true);
1556 if (FrameRegOffset) {
1557
1558 SrcReg = MI.getOperand(0).getReg();
1560 FrameRegOffset, TII);
1561 }
1566 MI.eraseFromParent();
1567 return true;
1568 }
1569 case AArch64::TAGPstack: {
1570 int64_t Offset = MI.getOperand(2).getImm();
1572 TII->get(Offset >= 0 ? AArch64::ADDG : AArch64::SUBG))
1577 MI.eraseFromParent();
1578 return true;
1579 }
1580 case AArch64::STGloop_wback:
1581 case AArch64::STZGloop_wback:
1582 return expandSetTagLoop(MBB, MBBI, NextMBBI);
1583 case AArch64::STGloop:
1584 case AArch64::STZGloop:
1586 "Non-writeback variants of STGloop / STZGloop should not "
1587 "survive past PrologEpilogInserter.");
1588 case AArch64::STR_ZZZZXI:
1589 return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 4);
1590 case AArch64::STR_ZZZXI:
1591 return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 3);
1592 case AArch64::STR_ZZXI:
1593 return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 2);
1594 case AArch64::STR_PPXI:
1595 return expandSVESpillFill(MBB, MBBI, AArch64::STR_PXI, 2);
1596 case AArch64::LDR_ZZZZXI:
1597 return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 4);
1598 case AArch64::LDR_ZZZXI:
1599 return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 3);
1600 case AArch64::LDR_ZZXI:
1601 return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 2);
1602 case AArch64::LDR_PPXI:
1603 return expandSVESpillFill(MBB, MBBI, AArch64::LDR_PXI, 2);
1604 case AArch64::BLR_RVMARKER:
1605 case AArch64::BLRA_RVMARKER:
1606 return expandCALL_RVMARKER(MBB, MBBI);
1607 case AArch64::BLR_BTI:
1608 return expandCALL_BTI(MBB, MBBI);
1609 case AArch64::StoreSwiftAsyncContext:
1610 return expandStoreSwiftAsyncContext(MBB, MBBI);
1611 case AArch64::RestoreZAPseudo: {
1612 auto *NewMBB = expandRestoreZA(MBB, MBBI);
1613 if (NewMBB != &MBB)
1615 return true;
1616 }
1617 case AArch64::MSRpstatePseudo: {
1618 auto *NewMBB = expandCondSMToggle(MBB, MBBI);
1619 if (NewMBB != &MBB)
1621 return true;
1622 }
1623 case AArch64::COALESCER_BARRIER_FPR16:
1624 case AArch64::COALESCER_BARRIER_FPR32:
1625 case AArch64::COALESCER_BARRIER_FPR64:
1626 case AArch64::COALESCER_BARRIER_FPR128:
1627 MI.eraseFromParent();
1628 return true;
1629 case AArch64::LD1B_2Z_IMM_PSEUDO:
1630 return expandMultiVecPseudo(
1631 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1632 AArch64::LD1B_2Z_IMM, AArch64::LD1B_2Z_STRIDED_IMM);
1633 case AArch64::LD1H_2Z_IMM_PSEUDO:
1634 return expandMultiVecPseudo(
1635 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1636 AArch64::LD1H_2Z_IMM, AArch64::LD1H_2Z_STRIDED_IMM);
1637 case AArch64::LD1W_2Z_IMM_PSEUDO:
1638 return expandMultiVecPseudo(
1639 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1640 AArch64::LD1W_2Z_IMM, AArch64::LD1W_2Z_STRIDED_IMM);
1641 case AArch64::LD1D_2Z_IMM_PSEUDO:
1642 return expandMultiVecPseudo(
1643 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1644 AArch64::LD1D_2Z_IMM, AArch64::LD1D_2Z_STRIDED_IMM);
1645 case AArch64::LDNT1B_2Z_IMM_PSEUDO:
1646 return expandMultiVecPseudo(
1647 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1648 AArch64::LDNT1B_2Z_IMM, AArch64::LDNT1B_2Z_STRIDED_IMM);
1649 case AArch64::LDNT1H_2Z_IMM_PSEUDO:
1650 return expandMultiVecPseudo(
1651 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1652 AArch64::LDNT1H_2Z_IMM, AArch64::LDNT1H_2Z_STRIDED_IMM);
1653 case AArch64::LDNT1W_2Z_IMM_PSEUDO:
1654 return expandMultiVecPseudo(
1655 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1656 AArch64::LDNT1W_2Z_IMM, AArch64::LDNT1W_2Z_STRIDED_IMM);
1657 case AArch64::LDNT1D_2Z_IMM_PSEUDO:
1658 return expandMultiVecPseudo(
1659 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1660 AArch64::LDNT1D_2Z_IMM, AArch64::LDNT1D_2Z_STRIDED_IMM);
1661 case AArch64::LD1B_2Z_PSEUDO:
1662 return expandMultiVecPseudo(MBB, MBBI, AArch64::ZPR2RegClass,
1663 AArch64::ZPR2StridedRegClass, AArch64::LD1B_2Z,
1664 AArch64::LD1B_2Z_STRIDED);
1665 case AArch64::LD1H_2Z_PSEUDO:
1666 return expandMultiVecPseudo(MBB, MBBI, AArch64::ZPR2RegClass,
1667 AArch64::ZPR2StridedRegClass, AArch64::LD1H_2Z,
1668 AArch64::LD1H_2Z_STRIDED);
1669 case AArch64::LD1W_2Z_PSEUDO:
1670 return expandMultiVecPseudo(MBB, MBBI, AArch64::ZPR2RegClass,
1671 AArch64::ZPR2StridedRegClass, AArch64::LD1W_2Z,
1672 AArch64::LD1W_2Z_STRIDED);
1673 case AArch64::LD1D_2Z_PSEUDO:
1674 return expandMultiVecPseudo(MBB, MBBI, AArch64::ZPR2RegClass,
1675 AArch64::ZPR2StridedRegClass, AArch64::LD1D_2Z,
1676 AArch64::LD1D_2Z_STRIDED);
1677 case AArch64::LDNT1B_2Z_PSEUDO:
1678 return expandMultiVecPseudo(
1679 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1680 AArch64::LDNT1B_2Z, AArch64::LDNT1B_2Z_STRIDED);
1681 case AArch64::LDNT1H_2Z_PSEUDO:
1682 return expandMultiVecPseudo(
1683 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1684 AArch64::LDNT1H_2Z, AArch64::LDNT1H_2Z_STRIDED);
1685 case AArch64::LDNT1W_2Z_PSEUDO:
1686 return expandMultiVecPseudo(
1687 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1688 AArch64::LDNT1W_2Z, AArch64::LDNT1W_2Z_STRIDED);
1689 case AArch64::LDNT1D_2Z_PSEUDO:
1690 return expandMultiVecPseudo(
1691 MBB, MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1692 AArch64::LDNT1D_2Z, AArch64::LDNT1D_2Z_STRIDED);
1693 case AArch64::LD1B_4Z_IMM_PSEUDO:
1694 return expandMultiVecPseudo(
1695 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1696 AArch64::LD1B_4Z_IMM, AArch64::LD1B_4Z_STRIDED_IMM);
1697 case AArch64::LD1H_4Z_IMM_PSEUDO:
1698 return expandMultiVecPseudo(
1699 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1700 AArch64::LD1H_4Z_IMM, AArch64::LD1H_4Z_STRIDED_IMM);
1701 case AArch64::LD1W_4Z_IMM_PSEUDO:
1702 return expandMultiVecPseudo(
1703 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1704 AArch64::LD1W_4Z_IMM, AArch64::LD1W_4Z_STRIDED_IMM);
1705 case AArch64::LD1D_4Z_IMM_PSEUDO:
1706 return expandMultiVecPseudo(
1707 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1708 AArch64::LD1D_4Z_IMM, AArch64::LD1D_4Z_STRIDED_IMM);
1709 case AArch64::LDNT1B_4Z_IMM_PSEUDO:
1710 return expandMultiVecPseudo(
1711 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1712 AArch64::LDNT1B_4Z_IMM, AArch64::LDNT1B_4Z_STRIDED_IMM);
1713 case AArch64::LDNT1H_4Z_IMM_PSEUDO:
1714 return expandMultiVecPseudo(
1715 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1716 AArch64::LDNT1H_4Z_IMM, AArch64::LDNT1H_4Z_STRIDED_IMM);
1717 case AArch64::LDNT1W_4Z_IMM_PSEUDO:
1718 return expandMultiVecPseudo(
1719 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1720 AArch64::LDNT1W_4Z_IMM, AArch64::LDNT1W_4Z_STRIDED_IMM);
1721 case AArch64::LDNT1D_4Z_IMM_PSEUDO:
1722 return expandMultiVecPseudo(
1723 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1724 AArch64::LDNT1D_4Z_IMM, AArch64::LDNT1D_4Z_STRIDED_IMM);
1725 case AArch64::LD1B_4Z_PSEUDO:
1726 return expandMultiVecPseudo(MBB, MBBI, AArch64::ZPR4RegClass,
1727 AArch64::ZPR4StridedRegClass, AArch64::LD1B_4Z,
1728 AArch64::LD1B_4Z_STRIDED);
1729 case AArch64::LD1H_4Z_PSEUDO:
1730 return expandMultiVecPseudo(MBB, MBBI, AArch64::ZPR4RegClass,
1731 AArch64::ZPR4StridedRegClass, AArch64::LD1H_4Z,
1732 AArch64::LD1H_4Z_STRIDED);
1733 case AArch64::LD1W_4Z_PSEUDO:
1734 return expandMultiVecPseudo(MBB, MBBI, AArch64::ZPR4RegClass,
1735 AArch64::ZPR4StridedRegClass, AArch64::LD1W_4Z,
1736 AArch64::LD1W_4Z_STRIDED);
1737 case AArch64::LD1D_4Z_PSEUDO:
1738 return expandMultiVecPseudo(MBB, MBBI, AArch64::ZPR4RegClass,
1739 AArch64::ZPR4StridedRegClass, AArch64::LD1D_4Z,
1740 AArch64::LD1D_4Z_STRIDED);
1741 case AArch64::LDNT1B_4Z_PSEUDO:
1742 return expandMultiVecPseudo(
1743 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1744 AArch64::LDNT1B_4Z, AArch64::LDNT1B_4Z_STRIDED);
1745 case AArch64::LDNT1H_4Z_PSEUDO:
1746 return expandMultiVecPseudo(
1747 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1748 AArch64::LDNT1H_4Z, AArch64::LDNT1H_4Z_STRIDED);
1749 case AArch64::LDNT1W_4Z_PSEUDO:
1750 return expandMultiVecPseudo(
1751 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1752 AArch64::LDNT1W_4Z, AArch64::LDNT1W_4Z_STRIDED);
1753 case AArch64::LDNT1D_4Z_PSEUDO:
1754 return expandMultiVecPseudo(
1755 MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1756 AArch64::LDNT1D_4Z, AArch64::LDNT1D_4Z_STRIDED);
1757 case AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO:
1758 return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 2);
1759 case AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO:
1760 return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 4);
1761 }
1762 return false;
1763}
1764
1765
1766
1769
1771 while (MBBI != E) {
1774 MBBI = NMBBI;
1775 }
1776
1778}
1779
1780bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
1782
1784 for (auto &MBB : MF)
1787}
1788
1789
1791 return new AArch64ExpandPseudo();
1792}
#define AARCH64_EXPAND_PSEUDO_NAME
MachineInstrBuilder & UseMI
static MachineInstr * createCallWithOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const AArch64InstrInfo *TII, unsigned Opcode, ArrayRef< MachineOperand > ExplicitOps, unsigned RegMaskStartIdx)
static MachineInstr * createCall(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const AArch64InstrInfo *TII, MachineOperand &CallTarget, unsigned RegMaskStartIdx)
MachineInstrBuilder MachineInstrBuilder & DefMI
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
unsigned getTaggedBasePointerOffset() const
bool isTargetILP32() const
bool isTargetMachO() const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
The address of a basic block.
FunctionPass class - This class is used to implement most global optimizations.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Describe properties that are true of each instruction in the target description file.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineBasicBlock * splitAt(MachineInstr &SplitInst, bool UpdateLiveIns=true, LiveIntervals *LIS=nullptr)
Split a basic block into 2 pieces at SplitPoint.
void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
void moveAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
const char * getSymbolName() const
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringRef - Represent a constant reference to a string, i.e.
CodeModel::Model getCodeModel() const
Returns the code model.
ArrayRef< MCPhysReg > getRegisters() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_PREL
MO_PREL - Indicates that the bits of the symbol operand represented by MO_G0 etc are PC relative.
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
@ MO_G3
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address,...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
int getSVERevInstr(uint16_t Opcode)
@ DestructiveInstTypeMask
@ DestructiveUnaryPassthru
@ DestructiveTernaryCommWithRev
@ DestructiveBinaryCommWithRev
int getSVEPseudoMap(uint16_t Opcode)
int getSVENonRevInstr(uint16_t Opcode)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Renamable
Register that may be renamed.
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
unsigned getDeadRegState(bool B)
void initializeAArch64ExpandPseudoPass(PassRegistry &)
FunctionPass * createAArch64ExpandPseudoPass()
Returns an instance of the pseudo instruction expansion pass.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
This struct is a compact representation of a valid (non-zero power of two) alignment.
Description of the encoding of one expression Op.