LLVM: lib/Target/X86/X86FrameLowering.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
37#include
38
39#define DEBUG_TYPE "x86-fl"
40
41STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue");
43 "Number of extra stack probes generated in prologue");
44STATISTIC(NumFunctionUsingPush2Pop2, "Number of funtions using push2/pop2");
45
46using namespace llvm;
47
52 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
53
57
60}
61
66}
67
68
69
70
71
76 (hasFP(MF) && ->hasStackRealignment(MF)) ||
78}
79
80
81
82
83
84
85
86
91}
92
93
94
95
106}
107
109 return IsLP64 ? X86::SUB64ri32 : X86::SUB32ri;
110}
111
113 return IsLP64 ? X86::ADD64ri32 : X86::ADD32ri;
114}
115
117 return IsLP64 ? X86::SUB64rr : X86::SUB32rr;
118}
119
121 return IsLP64 ? X86::ADD64rr : X86::ADD32rr;
122}
123
125 return IsLP64 ? X86::AND64ri32 : X86::AND32ri;
126}
127
129 return IsLP64 ? X86::LEA64r : X86::LEA32r;
130}
131
133 if (Use64BitReg) {
134 if (isUInt<32>(Imm))
135 return X86::MOV32ri64;
136 if (isInt<32>(Imm))
137 return X86::MOV64ri32;
138 return X86::MOV64ri;
139 }
140 return X86::MOV32ri;
141}
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
161 return ST.is64Bit() ? (ST.hasPPX() ? X86::PUSHP64r : X86::PUSH64r)
162 : X86::PUSH32r;
163}
165 return ST.is64Bit() ? (ST.hasPPX() ? X86::POPP64r : X86::POP64r)
166 : X86::POP32r;
167}
169 return ST.hasPPX() ? X86::PUSH2P : X86::PUSH2;
170}
172 return ST.hasPPX() ? X86::POP2P : X86::POP2;
173}
174
177 unsigned Reg = RegMask.PhysReg;
178
179 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
180 Reg == X86::AH || Reg == X86::AL)
181 return true;
182 }
183
184 return false;
185}
186
187
188
189
190
191static bool
194 bool BreakNext = false;
196 if (!MO.isReg())
197 continue;
199 if (Reg != X86::EFLAGS)
200 continue;
201
202
203
204
205 if (!MO.isDef())
206 return true;
207
208
209
210 BreakNext = true;
211 }
212
213 if (BreakNext)
214 return false;
215 }
216
217
218
220 if (Succ->isLiveIn(X86::EFLAGS))
221 return true;
222
223 return false;
224}
225
226
227
230 const DebugLoc &DL, int64_t NumBytes,
231 bool InEpilogue) const {
232 bool isSub = NumBytes < 0;
236
237 uint64_t Chunk = (1LL << 31) - 1;
238
243
244
245
246 if (EmitInlineStackProbe && !InEpilogue) {
247
248
249
251 return;
252 } else if (Offset > Chunk) {
253
254
255 unsigned Reg = 0;
256 unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
257
259 Reg = Rax;
260 else
262
263 unsigned AddSubRROpc =
265 if (Reg) {
272 MI->getOperand(3).setIsDead();
273 return;
274 } else if (Offset > 8 * Chunk) {
275
276
277
278
279
280
281
282 assert(Is64Bit && "can't have 32-bit 16GB stack frame");
286
287
288 if (isSub)
290 else
298 MI->getOperand(3).setIsDead();
299
303
306 return;
307 }
308 }
309
313
314
315 unsigned Reg = isSub ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
317 if (Reg) {
318 unsigned Opc = isSub ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
319 : (Is64Bit ? X86::POP64r : X86::POP32r);
324 continue;
325 }
326 }
327
328 BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue)
330
332 }
333}
334
338 assert(Offset != 0 && "zero offset stack adjustment requested");
339
340
341
342 bool UseLEA;
343 if (!InEpilogue) {
344
345
346
347
348 UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS);
349 } else {
350
351
352
353
354
356 if (UseLEA && .useLeaForSP())
358
359
361 "We shouldn't have allowed this insertion point");
362 }
363
365 if (UseLEA) {
370 } else {
371 bool IsSub = Offset < 0;
378 MI->getOperand(3).setIsDead();
379 }
380 return MI;
381}
382
385 bool doMergeWithPrevious) const {
386 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
387 (!doMergeWithPrevious && MBBI == MBB.end()))
388 return 0;
389
391
393
394
395
396
397
398
399
400
401
402
403
404 if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction())
405 PI = std::prev(PI);
406
407 unsigned Opc = PI->getOpcode();
409
410 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD32ri) &&
411 PI->getOperand(0).getReg() == StackPtr) {
413 Offset = PI->getOperand(2).getImm();
414 } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
415 PI->getOperand(0).getReg() == StackPtr &&
416 PI->getOperand(1).getReg() == StackPtr &&
417 PI->getOperand(2).getImm() == 1 &&
418 PI->getOperand(3).getReg() == X86::NoRegister &&
419 PI->getOperand(5).getReg() == X86::NoRegister) {
420
421 Offset = PI->getOperand(4).getImm();
422 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB32ri) &&
423 PI->getOperand(0).getReg() == StackPtr) {
425 Offset = -PI->getOperand(2).getImm();
426 } else
427 return 0;
428
430 if (PI != MBB.end() && PI->isCFIInstruction()) {
436 }
437 if (!doMergeWithPrevious)
439
441}
442
449 unsigned CFIIndex = MF.addFrameInst(CFIInst);
450
453
457}
458
459
460
464 if ((MF)) {
466 return;
467 }
470 const Register MachineFramePtr =
473 unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true);
474
479}
480
483 const DebugLoc &DL, bool IsPrologue) const {
488
489
491
492
496 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
497
498 if (IsPrologue) {
500
501
502
503
504
507 CfaExpr.push_back(dwarf::DW_CFA_expression);
512 const Register MachineFramePtr =
516 unsigned DwarfFramePtr = MRI->getDwarfRegNum(MachineFramePtr, true);
517 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfFramePtr));
522 } else {
525 }
526 } else {
529 }
530 }
532 int FI = MI->getOperand(1).getIndex();
536 const Register MachineFramePtr =
540 unsigned DwarfFramePtr = MRI->getDwarfRegNum(MachineFramePtr, true);
541 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfFramePtr));
544 CfaExpr.push_back(dwarf::DW_OP_deref);
545
547 DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
549 DefCfaExpr.append(CfaExpr.str());
550
554 }
555}
556
557void X86FrameLowering::emitZeroCallUsedRegs(BitVector RegsToZero,
560
561
563
564
568
569
570
573 if (!X86::RFP80RegClass.contains(Reg))
574 continue;
575
576 unsigned NumFPRegs = ST.is64Bit() ? 8 : 7;
577 for (unsigned i = 0; i != NumFPRegs; ++i)
579
580 for (unsigned i = 0; i != NumFPRegs; ++i)
582 break;
583 }
584
585
588 if (TRI->isGeneralPurposeRegister(MF, Reg)) {
590 RegsToZero.reset(Reg);
591 }
592
593
594 for (MCRegister Reg : GPRsToZero.set_bits())
596
597
600}
601
605 std::optionalMachineFunction::DebugInstrOperandPair InstrNum) const {
608 if (InProlog) {
610 .addImm(0 );
611 } else {
612 emitStackProbeInline(MF, MBB, MBBI, DL, false);
613 }
614 } else {
615 emitStackProbeCall(MF, MBB, MBBI, DL, InProlog, InstrNum);
616 }
617}
618
621}
622
626 return MI.getOpcode() == X86::STACKALLOC_W_PROBING;
627 });
628 if (Where != PrologMBB.end()) {
630 emitStackProbeInline(MF, PrologMBB, Where, DL, true);
631 Where->eraseFromParent();
632 }
633}
634
635void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
639 bool InProlog) const {
642 emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog);
643 else
644 emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog);
645}
646
647void X86FrameLowering::emitStackProbeInlineGeneric(
652
656 "different expansion expected for CoreCLR 64 bit");
657
658 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
659 uint64_t ProbeChunk = StackProbeSize * 8;
660
662 TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0;
663
664
665
666
667 if (Offset > ProbeChunk) {
668 emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset,
669 MaxAlign % StackProbeSize);
670 } else {
671 emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset,
672 MaxAlign % StackProbeSize);
673 }
674}
675
676void X86FrameLowering::emitStackProbeInlineGenericBlock(
679 uint64_t AlignOffset) const {
680
681 const bool NeedsDwarfCFI = needsDwarfCFI(MF);
685 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
686 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
687
689
690 assert(AlignOffset < StackProbeSize);
691
692
693 if (StackProbeSize < Offset + AlignOffset) {
694
696 BuildStackAdjustment(MBB, MBBI, DL, -StackAdjustment, false)
698 if (!HasFP && NeedsDwarfCFI) {
702 }
703
709 NumFrameExtraProbe++;
710 CurrentOffset = StackProbeSize - AlignOffset;
711 }
712
713
714
715
716 while (CurrentOffset + StackProbeSize < Offset) {
717 BuildStackAdjustment(MBB, MBBI, DL, -StackProbeSize, false)
719
720 if (!HasFP && NeedsDwarfCFI) {
724 }
730 NumFrameExtraProbe++;
731 CurrentOffset += StackProbeSize;
732 }
733
734
737
738
739 unsigned Reg = Is64Bit ? X86::RAX : X86::EAX;
740 unsigned Opc = Is64Bit ? X86::PUSH64r : X86::PUSH32r;
744 } else {
745 BuildStackAdjustment(MBB, MBBI, DL, -ChunkSize, false)
747 }
748
749
750}
751
752void X86FrameLowering::emitStackProbeInlineGenericLoop(
755 uint64_t AlignOffset) const {
757
760 "Inline stack probe loop will clobber live EFLAGS.");
761
762 const bool NeedsDwarfCFI = needsDwarfCFI(MF);
766 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
767 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
768
769 if (AlignOffset) {
770 if (AlignOffset < StackProbeSize) {
771
772 BuildStackAdjustment(MBB, MBBI, DL, -AlignOffset, false)
774
780 NumFrameExtraProbe++;
781 Offset -= AlignOffset;
782 }
783 }
784
785
786 NumFrameLoopProbe++;
788
791
793 MF.insert(MBBIter, testMBB);
794 MF.insert(MBBIter, tailMBB);
795
798 : X86::EAX;
799
803
804
805 {
806 const unsigned BoundOffset = alignDown(Offset, StackProbeSize);
809 .addReg(FinalStackProbed)
812
813
814
815 if (!HasFP && NeedsDwarfCFI) {
816
817
818 const Register DwarfFinalStackProbed =
821 : FinalStackProbed;
822
825 nullptr, TRI->getDwarfRegNum(DwarfFinalStackProbed, true)));
828 }
829 }
830
831
832 BuildStackAdjustment(*testMBB, testMBB->end(), DL, -StackProbeSize,
833 false)
835
836
842
843
846 .addReg(FinalStackProbed)
848
849
856
857
861
862
865 if (TailOffset) {
866 BuildStackAdjustment(*tailMBB, TailMBBIter, DL, -TailOffset,
867 false)
869 }
870
871
872 if (!HasFP && NeedsDwarfCFI) {
873
874
875 const Register DwarfStackPtr =
879
882 nullptr, TRI->getDwarfRegNum(DwarfStackPtr, true)));
883 }
884
885
887}
888
889void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
893 assert(STI.is64Bit() && "different expansion needed for 32 bit");
897
900 "Inline stack probe loop will clobber live EFLAGS.");
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
933
935 MF.insert(MBBIter, RoundMBB);
936 MF.insert(MBBIter, LoopMBB);
937 MF.insert(MBBIter, ContinueMBB);
938
939
943
944
945 const int64_t ThreadEnvironmentStackLimit = 0x10;
946 const int64_t PageSize = 0x1000;
947 const int64_t PageMask = ~(PageSize - 1);
948
949
950
954 SizeReg = InProlog ? X86::RAX : MRI.createVirtualRegister(RegClass),
955 ZeroReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
956 CopyReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
957 TestReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
958 FinalReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
959 RoundedReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
960 LimitReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
961 JoinReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
962 ProbeReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass);
963
964
965 int64_t RCXShadowSlot = 0;
966 int64_t RDXShadowSlot = 0;
967
968
969 if (InProlog) {
970
971
972
976
977
978
979
980 const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX);
981 const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX);
982 int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0);
983
984
985 if (IsRCXLiveIn)
986 RCXShadowSlot = InitSlot;
987 if (IsRDXLiveIn)
988 RDXShadowSlot = InitSlot;
989 if (IsRDXLiveIn && IsRCXLiveIn)
990 RDXShadowSlot += 8;
991
992 if (IsRCXLiveIn)
994 RCXShadowSlot)
996 if (IsRDXLiveIn)
998 RDXShadowSlot)
1000 } else {
1001
1003 }
1004
1005
1006
1018
1019
1020
1021
1022
1023
1024
1025
1030 .addImm(ThreadEnvironmentStackLimit)
1033
1035 .addMBB(ContinueMBB)
1037
1038
1039 if (InProlog)
1041 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg)
1045
1046
1047
1048
1049 if (!InProlog) {
1050 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg)
1055 }
1056
1057 if (InProlog)
1061
1062
1070
1071 if (InProlog)
1079
1081
1082
1083 if (InProlog) {
1084 if (RCXShadowSlot)
1086 TII.get(X86::MOV64rm), X86::RCX),
1087 X86::RSP, false, RCXShadowSlot);
1088 if (RDXShadowSlot)
1090 TII.get(X86::MOV64rm), X86::RDX),
1091 X86::RSP, false, RDXShadowSlot);
1092 }
1093
1094
1095
1096 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
1099
1100
1106
1107 if (InProlog) {
1110 }
1111
1112
1113 if (InProlog) {
1114 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) {
1116 }
1119 }
1122 }
1126 }
1127 }
1128}
1129
1130void X86FrameLowering::emitStackProbeCall(
1133 std::optionalMachineFunction::DebugInstrOperandPair InstrNum) const {
1135
1136
1138 report_fatal_error("Emitting stack probe calls on 64-bit with the large "
1139 "code model and indirect thunks not yet implemented.");
1140
1143 "Stack probe calls will clobber live EFLAGS.");
1144
1145 unsigned CallOp;
1147 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
1148 else
1149 CallOp = X86::CALLpcrel32;
1150
1152
1155
1156
1157
1159
1160
1164 } else {
1167 }
1168
1176
1179
1180
1181
1182
1183
1184
1185 ModInst =
1189 }
1190
1191
1192
1193
1194 if (InstrNum) {
1196
1199 } else {
1200
1201
1202 unsigned SPDefOperand = ModInst->getNumOperands() - 2;
1205 }
1206 }
1207
1208 if (InProlog) {
1209
1210 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)
1212 }
1213}
1214
1216
1217
1218 const uint64_t Win64MaxSEHOffset = 128;
1219 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
1220
1221 return SEHFrameOffset & -16;
1222}
1223
1224
1225
1226
1227
1229X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
1231 Align MaxAlign = MFI.getMaxAlign();
1234 if (HasRealign) {
1236 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
1237 else if (MaxAlign < SlotSize)
1239 }
1240
1242 if (HasRealign)
1243 MaxAlign = (MaxAlign > 16) ? MaxAlign : Align(16);
1244 else
1245 MaxAlign = Align(16);
1246 }
1247 return MaxAlign.value();
1248}
1249
1256
1260 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
1261 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
1262
1263
1264
1265
1266 if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) {
1267 {
1268 NumFrameLoopProbe++;
1277
1279 MF.insert(MBBIter, entryMBB);
1280 MF.insert(MBBIter, headMBB);
1281 MF.insert(MBBIter, bodyMBB);
1282 MF.insert(MBBIter, footMBB);
1283 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
1286 : X86::EAX;
1287
1288
1289 {
1290
1292 BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)
1296 BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed)
1297 .addReg(FinalStackProbed)
1300
1301
1302 MI->getOperand(3).setIsDead();
1303
1306 .addReg(FinalStackProbed)
1315 }
1316
1317
1318
1319 {
1323 .addImm(StackProbeSize)
1325
1329 .addReg(FinalStackProbed)
1331
1332
1337
1340 }
1341
1342
1343 {
1349
1353 .addImm(StackProbeSize)
1355
1356
1359 .addReg(FinalStackProbed)
1362
1363
1370 }
1371
1372
1373 {
1375 .addReg(FinalStackProbed)
1383 }
1384
1386 }
1387 } else {
1392
1393
1394 MI->getOperand(3).setIsDead();
1395 }
1396}
1397
1399
1400
1402 "MF used frame lowering for wrong subtarget");
1406}
1407
1408
1409
1410
1411bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const {
1413}
1414
1415bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const {
1417}
1418
1419
1421 switch (Opcode) {
1422 case X86::REPNE_PREFIX:
1423 case X86::REP_MOVSB_32:
1424 case X86::REP_MOVSB_64:
1425 case X86::REP_MOVSD_32:
1426 case X86::REP_MOVSD_64:
1427 case X86::REP_MOVSQ_32:
1428 case X86::REP_MOVSQ_64:
1429 case X86::REP_MOVSW_32:
1430 case X86::REP_MOVSW_64:
1431 case X86::REP_PREFIX:
1432 case X86::REP_STOSB_32:
1433 case X86::REP_STOSB_64:
1434 case X86::REP_STOSD_32:
1435 case X86::REP_STOSD_64:
1436 case X86::REP_STOSQ_32:
1437 case X86::REP_STOSQ_64:
1438 case X86::REP_STOSW_32:
1439 case X86::REP_STOSW_64:
1440 return true;
1441 default:
1442 break;
1443 }
1444 return false;
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1535 "MF used frame lowering for wrong subtarget");
1540 uint64_t MaxAlign = calculateMaxStackAlign(MF);
1546 bool FnHasClrFunclet =
1548 bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
1549 bool HasFP = hasFP(MF);
1550 bool IsWin64Prologue = isWin64Prologue(MF);
1552
1555 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
1556 bool NeedsDwarfCFI = needsDwarfCFI(MF);
1558 const Register MachineFramePtr =
1562 bool HasWinCFI = false;
1563
1564
1565
1568
1569
1571
1572
1573 ArgBaseReg = MI->getOperand(0).getReg();
1574
1575
1576
1577
1579 ArgBaseReg)
1582 .addUse(X86::NoRegister)
1584 .addUse(X86::NoRegister)
1586 if (NeedsDwarfCFI) {
1587
1588 unsigned DwarfStackPtr = TRI->getDwarfRegNum(ArgBaseReg, true);
1592 }
1598 .addReg(X86::NoRegister)
1600 .addReg(X86::NoRegister)
1602 }
1603
1604
1605
1607 if (TailCallArgReserveSize && IsWin64Prologue)
1608 report_fatal_error("Can't handle guaranteed tail call under win64 yet");
1609
1610 const bool EmitStackProbeCall =
1613
1618
1619
1621 .addUse(MachineFramePtr)
1624 .addUse(X86::NoRegister)
1627 .addUse(X86::NoRegister);
1628 break;
1629 }
1630 [[fallthrough]];
1631
1634 !IsWin64Prologue &&
1635 "win64 prologue does not set the bit 60 in the saved frame pointer");
1637 .addUse(MachineFramePtr)
1640 break;
1641
1643 break;
1644 }
1645 }
1646
1647
1648
1649
1652 StackSize += 8;
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1667 }
1668
1669
1670
1671
1672
1673
1677 !EmitStackProbeCall &&
1682 if (HasFP)
1684 X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
1685 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
1687 }
1688
1689
1690
1691
1692 if (TailCallArgReserveSize != 0) {
1693 BuildStackAdjustment(MBB, MBBI, DL, -(int)TailCallArgReserveSize,
1694 false)
1696 }
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1713 int stackGrowth = -SlotSize;
1714
1715
1716 Register Establisher = X86::NoRegister;
1717 if (IsClrFunclet)
1719 else if (IsFunclet)
1721
1722 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {
1723
1724
1725
1726 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1728 .addReg(Establisher)
1731 }
1732
1733 if (HasFP) {
1735
1736
1738 NumBytes =
1740
1741
1742 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)
1743 NumBytes = alignTo(NumBytes, MaxAlign);
1744
1745
1750
1751 if (NeedsDwarfCFI && !ArgBaseReg.isValid()) {
1752
1753
1757 nullptr, -2 * stackGrowth + (int)TailCallArgReserveSize),
1759
1760
1761 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1764 2 * stackGrowth -
1765 (int)TailCallArgReserveSize),
1767 }
1768
1769 if (NeedsWinCFI) {
1770 HasWinCFI = true;
1774 }
1775
1776 if (!IsFunclet) {
1778 assert(!IsWin64Prologue &&
1779 "win64 prologue does not store async context right below rbp");
1781
1782
1783
1784
1785 if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) {
1786
1787
1792 } else {
1793
1794
1798 }
1799
1800 if (NeedsWinCFI) {
1801 HasWinCFI = true;
1805 }
1806
1810 .addUse(X86::NoRegister)
1812 .addUse(X86::NoRegister)
1818 }
1819
1820 if (!IsWin64Prologue && !IsFunclet) {
1821
1828
1829 if (NeedsDwarfCFI) {
1830 if (ArgBaseReg.isValid()) {
1832 CfaExpr.push_back(dwarf::DW_CFA_expression);
1834 unsigned DwarfReg = TRI->getDwarfRegNum(MachineFramePtr, true);
1837 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
1839
1843 } else {
1844
1845
1846 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1851 }
1852 }
1853
1854 if (NeedsWinFPO) {
1855
1856 HasWinCFI = true;
1861 }
1862 }
1863 }
1864 } else {
1865 assert(!IsFunclet && "funclets without FPs not yet implemented");
1866 NumBytes =
1868 }
1869
1870
1871
1872 if (!IsFunclet) {
1873 if (HasFP && TRI->hasStackRealignment(MF))
1875 else
1877 }
1878
1879
1880
1881 unsigned ParentFrameNumBytes = NumBytes;
1882 if (IsFunclet)
1883 NumBytes = getWinEHFuncletFrameSize(MF);
1884
1885
1886 bool PushedRegs = false;
1891 return false;
1892 unsigned Opc = MBBI->getOpcode();
1893 return Opc == X86::PUSH32r || Opc == X86::PUSH64r || Opc == X86::PUSHP64r ||
1894 Opc == X86::PUSH2 || Opc == X86::PUSH2P;
1895 };
1896
1897 while (IsCSPush(MBBI)) {
1898 PushedRegs = true;
1899 Register Reg = MBBI->getOperand(0).getReg();
1900 LastCSPush = MBBI;
1902 unsigned Opc = LastCSPush->getOpcode();
1903
1904 if (!HasFP && NeedsDwarfCFI) {
1905
1906
1908
1909
1910 if (Opc == X86::PUSH2 || Opc == X86::PUSH2P)
1916 }
1917
1918 if (NeedsWinCFI) {
1919 HasWinCFI = true;
1923 if (Opc == X86::PUSH2 || Opc == X86::PUSH2P)
1925 .addImm(LastCSPush->getOperand(1).getReg())
1927 }
1928 }
1929
1930
1931
1932
1933 if (!IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF) &&
1934 !ArgBaseReg.isValid()) {
1935 assert(HasFP && "There should be a frame pointer if stack is realigned.");
1937
1938 if (NeedsWinCFI) {
1939 HasWinCFI = true;
1943 }
1944 }
1945
1946
1947
1948
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961 uint64_t AlignedNumBytes = NumBytes;
1962 if (IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF))
1963 AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
1964 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) {
1966 "The Red Zone is not accounted for in stack probes");
1967
1968
1970
1971 if (isEAXAlive) {
1973
1977 } else {
1978
1982 }
1983 }
1984
1986
1987
1988 int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;
1992 } else {
1993
1994
1996 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
1998 }
1999
2000
2002
2003 if (isEAXAlive) {
2004
2008 StackPtr, false, NumBytes - 8);
2009 else
2011 StackPtr, false, NumBytes - 4);
2014 }
2015 } else if (NumBytes) {
2017 }
2018
2019 if (NeedsWinCFI && NumBytes) {
2020 HasWinCFI = true;
2024 }
2025
2026 int SEHFrameOffset = 0;
2027 unsigned SPOrEstablisher;
2028 if (IsFunclet) {
2029 if (IsClrFunclet) {
2030
2031
2032
2033
2034 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
2038 Establisher, false, PSPSlotOffset)
2041 ;
2042
2043
2045 false, PSPSlotOffset)
2046 .addReg(Establisher)
2048 NoInfo,
2051 }
2052 SPOrEstablisher = Establisher;
2053 } else {
2055 }
2056
2057 if (IsWin64Prologue && HasFP) {
2058
2059
2060
2062 if (SEHFrameOffset)
2064 SPOrEstablisher, false, SEHFrameOffset);
2065 else
2067 .addReg(SPOrEstablisher);
2068
2069
2070 if (NeedsWinCFI && !IsFunclet) {
2071 assert(!NeedsWinFPO && "this setframe incompatible with FPO data");
2072 HasWinCFI = true;
2075 .addImm(SEHFrameOffset)
2079 }
2080 } else if (IsFunclet && STI.is32Bit()) {
2081
2083
2084
2090
2092 false, EHRegOffset)
2094 }
2095 }
2096
2100
2101 if (NeedsWinCFI) {
2102 int FI;
2104 if (X86::FR64RegClass.contains(Reg)) {
2107 if (IsWin64Prologue && IsFunclet)
2109 else
2112 SEHFrameOffset;
2113
2114 HasWinCFI = true;
2115 assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data");
2120 }
2121 }
2122 }
2123 }
2124
2125 if (NeedsWinCFI && HasWinCFI)
2128
2129 if (FnHasClrFunclet && !IsFunclet) {
2130
2131
2132
2133 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
2137 PSPSlotOffset)
2142 }
2143
2144
2145
2146
2147 if (IsWin64Prologue && TRI->hasStackRealignment(MF)) {
2148 assert(HasFP && "There should be a frame pointer if stack is realigned.");
2149 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);
2150 }
2151
2152
2153 if (IsFunclet && STI.is32Bit())
2154 return;
2155
2156
2157
2158
2159
2161
2164 .addReg(SPOrEstablisher)
2167
2168
2172 .addReg(SPOrEstablisher)
2174 }
2175
2177
2178
2179
2180
2186 assert(UsedReg == BasePtr);
2190 }
2191 }
2192 if (ArgBaseReg.isValid()) {
2193
2195 int FI = MI->getOperand(1).getIndex();
2196 unsigned MOVmr = Is64Bit ? X86::MOV64mr : X86::MOV32mr;
2197
2201 }
2202
2203 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
2204
2205 if (!HasFP && NumBytes) {
2206
2212 }
2213
2214
2216 }
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2231 bool NeedsCLD = false;
2232
2235 if (MI.isCall()) {
2236 NeedsCLD = true;
2237 break;
2238 }
2239
2241 NeedsCLD = true;
2242 break;
2243 }
2244
2245 if (MI.isInlineAsm()) {
2246
2247
2248
2249 NeedsCLD = true;
2250 break;
2251 }
2252 }
2253 }
2254
2255 if (NeedsCLD) {
2258 }
2259 }
2260
2261
2263}
2264
2267
2268
2269
2270
2271
2272
2274}
2275
2277 switch (MI.getOpcode()) {
2278 case X86::CATCHRET:
2279 case X86::CLEANUPRET:
2280 return true;
2281 default:
2282 return false;
2283 }
2285}
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300unsigned
2301X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
2305 true)
2308 return static_cast<unsigned>(Offset);
2309}
2310
2311unsigned
2312X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
2314
2316
2318 unsigned XMMSize =
2319 WinEHXMMSlotInfo.size() * TRI->getSpillSize(X86::VR128RegClass);
2320
2321 unsigned UsedSize;
2325
2326
2327
2328 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;
2329 } else {
2330
2332 }
2333
2334
2335
2337
2338
2339 return FrameSizeMinusRBP + XMMSize - CSSize;
2340}
2341
2343 return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
2344 Opc == X86::TCRETURNmi || Opc == X86::TCRETURNri64 ||
2345 Opc == X86::TCRETURNdi64 || Opc == X86::TCRETURNmi64;
2346}
2347
2356 DL = MBBI->getDebugLoc();
2357
2362
2364 bool NeedsWin64CFI =
2367
2368
2370 uint64_t MaxAlign = calculateMaxStackAlign(MF);
2373 bool HasFP = hasFP(MF);
2375
2379
2382 unsigned Opc = X86::LEA32r;
2383 Register StackReg = X86::ESP;
2384 ArgBaseReg = MI->getOperand(0).getReg();
2385 if (STI.is64Bit()) {
2386 Opc = X86::LEA64r;
2387 StackReg = X86::RSP;
2388 }
2389
2390
2394 .addUse(X86::NoRegister)
2396 .addUse(X86::NoRegister)
2398 if (NeedsDwarfCFI) {
2399 unsigned DwarfStackPtr = TRI->getDwarfRegNum(StackReg, true);
2404 }
2406 }
2407
2408 if (IsFunclet) {
2409 assert(HasFP && "EH funclets without FP not yet implemented");
2410 NumBytes = getWinEHFuncletFrameSize(MF);
2411 } else if (HasFP) {
2412
2414 NumBytes = FrameSize - CSSize - TailCallArgReserveSize;
2415
2416
2417
2418 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)
2419 NumBytes = alignTo(FrameSize, MaxAlign);
2420 } else {
2421 NumBytes = StackSize - CSSize - TailCallArgReserveSize;
2422 }
2423 uint64_t SEHStackAllocAmt = NumBytes;
2424
2425
2427 if (HasFP) {
2429
2432 }
2433
2436 MachineFramePtr)
2438
2439
2440
2443 .addUse(MachineFramePtr)
2446 }
2447
2448 if (NeedsDwarfCFI) {
2449 if (!ArgBaseReg.isValid()) {
2450 unsigned DwarfStackPtr =
2451 TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true);
2455 }
2457 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
2462 --AfterPop;
2463 }
2465 }
2466 }
2467
2469
2472 unsigned Opc = PI->getOpcode();
2473
2474 if (Opc != X86::DBG_VALUE && !PI->isTerminator()) {
2476 (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::BTR64ri8 &&
2477 Opc != X86::ADD64ri32 && Opc != X86::POPP64r && Opc != X86::POP2 &&
2478 Opc != X86::POP2P && Opc != X86::LEA64r))
2479 break;
2480 FirstCSPop = PI;
2481 }
2482
2484 }
2485 if (ArgBaseReg.isValid()) {
2486
2488 int FI = MI->getOperand(1).getIndex();
2489 unsigned MOVrm = Is64Bit ? X86::MOV64rm : X86::MOV32rm;
2490
2493 }
2494 MBBI = FirstCSPop;
2495
2496 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)
2497 emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator);
2498
2500 DL = MBBI->getDebugLoc();
2501
2502
2505
2506
2507
2508
2509
2511 !IsFunclet) {
2512 if (TRI->hasStackRealignment(MF))
2513 MBBI = FirstCSPop;
2516 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
2517
2519 LEAAmount -= 16;
2520
2521
2522
2523
2524
2525
2526
2527
2528 if (LEAAmount != 0) {
2531 false, LEAAmount);
2533 } else {
2534 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
2537 }
2538 } else if (NumBytes) {
2539
2541 if (!HasFP && NeedsDwarfCFI) {
2542
2545 nullptr, CSSize + TailCallArgReserveSize + SlotSize),
2547 }
2549 }
2550
2551
2552
2553
2554
2555
2556
2557 if (NeedsWin64CFI && MF.hasWinCFI())
2559
2560 if (!HasFP && NeedsDwarfCFI) {
2561 MBBI = FirstCSPop;
2563
2564
2567 unsigned Opc = PI->getOpcode();
2569 if (Opc == X86::POP32r || Opc == X86::POP64r || Opc == X86::POPP64r ||
2570 Opc == X86::POP2 || Opc == X86::POP2P) {
2572
2573
2574 if (Opc == X86::POP2 || Opc == X86::POP2P)
2579 }
2580 }
2581 }
2582
2583
2584
2585
2588
2590
2592 assert(Offset >= 0 && "TCDelta should never be positive");
2594
2597 }
2598 }
2599
2600
2602 BuildMI(MBB, Terminator, DL, TII.get(X86::TILERELEASE));
2603}
2604
2606 int FI,
2609
2611
2612
2613
2616 else if (TRI->hasStackRealignment(MF))
2618 else
2620
2621
2622
2623
2624
2630 int64_t FPDelta = 0;
2631
2632
2633
2634
2635
2639 }
2640
2641 if (IsWin64Prologue) {
2643
2644
2646
2647
2650 uint64_t NumBytes = FrameSize - CSSize;
2651
2653 if (FI && FI == X86FI->getFAIndex())
2655
2656
2657
2658
2659
2660 FPDelta = FrameSize - SEHFrameOffset;
2662 "FPDelta isn't aligned per the Win64 ABI!");
2663 }
2664
2666
2668
2669
2671
2672
2674 if (TailCallReturnAddrDelta < 0)
2675 Offset -= TailCallReturnAddrDelta;
2676
2678 }
2679
2680
2681
2682
2686}
2687
2693 const auto it = WinEHXMMSlotInfo.find(FI);
2694
2695 if (it == WinEHXMMSlotInfo.end())
2697
2700 it->second;
2701}
2702
2706 int Adjustment) const {
2711}
2712
2716 bool IgnoreSPUpdates) const {
2717
2719
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2757
2758
2759
2760
2763
2764
2766 "we don't handle this case!");
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2794}
2795
2798 std::vector &CSI) const {
2801
2802 unsigned CalleeSavedFrameSize = 0;
2803 unsigned XMMCalleeSavedFrameSize = 0;
2806
2808
2809 if (TailCallReturnAddrDelta < 0) {
2810
2811
2812
2813
2814
2815
2816
2817
2818
2820 TailCallReturnAddrDelta - SlotSize, true);
2821 }
2822
2823
2824 if (this->TRI->hasBasePointer(MF)) {
2825
2830 }
2831 }
2832
2833 if (hasFP(MF)) {
2834
2835 SpillSlotOffset -= SlotSize;
2837
2838
2839
2841 SpillSlotOffset -= SlotSize;
2843 SpillSlotOffset -= SlotSize;
2844 }
2845
2846
2847
2848
2850 for (unsigned i = 0; i < CSI.size(); ++i) {
2851 if (TRI->regsOverlap(CSI[i].getReg(), FPReg)) {
2852 CSI.erase(CSI.begin() + i);
2853 break;
2854 }
2855 }
2856 }
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867 unsigned NumRegsForPush2 = 0;
2868 if (STI.hasPush2Pop2()) {
2870 return X86::GR64RegClass.contains(I.getReg());
2871 });
2872 bool NeedPadding = (SpillSlotOffset % 16 != 0) && (NumCSGPR % 2 == 0);
2873 bool UsePush2Pop2 = NeedPadding ? NumCSGPR > 2 : NumCSGPR > 1;
2875 NumRegsForPush2 = UsePush2Pop2 ? alignDown(NumCSGPR, 2) : 0;
2877 SpillSlotOffset -= SlotSize;
2879 }
2880 }
2881
2882
2885
2886 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
2887 continue;
2888
2889
2890
2892 (SpillSlotOffset % 16 == 0 ||
2895
2896 SpillSlotOffset -= SlotSize;
2897 CalleeSavedFrameSize += SlotSize;
2898
2901 }
2902
2903
2904
2906 SpillSlotOffset -= SlotSize;
2907 CalleeSavedFrameSize += SlotSize;
2908
2910
2912 }
2914 "Expect even candidates for push2/pop2");
2916 ++NumFunctionUsingPush2Pop2;
2919
2920
2923 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
2924 continue;
2925
2926
2927 MVT VT = MVT::Other;
2928 if (X86::VK16RegClass.contains(Reg))
2929 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2930
2932 unsigned Size = TRI->getSpillSize(*RC);
2933 Align Alignment = TRI->getSpillAlign(*RC);
2934
2935 assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86");
2936 SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment);
2937
2938
2939 SpillSlotOffset -= Size;
2943
2944
2945 if (X86::VR128RegClass.contains(Reg)) {
2946 WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize;
2947 XMMCalleeSavedFrameSize += Size;
2948 }
2949 }
2950
2951 return true;
2952}
2953
2958
2959
2960
2962 return true;
2963
2964
2969
2970
2971
2972 auto UpdateLiveInCheckCanKill = [&](Register Reg) {
2974
2975
2976
2977
2978
2979 if (MRI.isLiveIn(Reg))
2980 return false;
2982
2984 if (MRI.isLiveIn(*AReg))
2985 return false;
2986 return true;
2987 };
2988 auto UpdateLiveInGetKillRegState = [&](Register Reg) {
2990 };
2991
2992 for (auto RI = CSI.rbegin(), RE = CSI.rend(); RI != RE; ++RI) {
2993 Register Reg = RI->getReg();
2994 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
2995 continue;
2996
3000 .addReg(Reg, UpdateLiveInGetKillRegState(Reg))
3001 .addReg(Reg2, UpdateLiveInGetKillRegState(Reg2))
3003 } else {
3005 .addReg(Reg, UpdateLiveInGetKillRegState(Reg))
3007 }
3008 }
3009
3011 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
3012 Register BaseReg = this->TRI->getBaseRegister();
3016 }
3017
3018
3019
3022 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
3023 continue;
3024
3025
3026 MVT VT = MVT::Other;
3027 if (X86::VK16RegClass.contains(Reg))
3028 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
3029
3030
3033
3036 --MI;
3038 ++MI;
3039 }
3040
3041 return true;
3042}
3043
3047
3050 "SEH should not use CATCHRET");
3053
3054
3055 if (STI.is64Bit()) {
3056
3061 .addMBB(CatchRetTarget)
3063 } else {
3064
3066 .addMBB(CatchRetTarget);
3067 }
3068
3069
3070
3072}
3073
3077 if (CSI.empty())
3078 return false;
3079
3081
3082
3083 if (STI.is32Bit())
3084 return true;
3085
3086
3087 if (MI->getOpcode() == X86::CATCHRET) {
3091 if (IsSEH)
3092 return true;
3093 }
3094 }
3095
3097
3098
3101 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
3102 continue;
3103
3104
3105 MVT VT = MVT::Other;
3106 if (X86::VK16RegClass.contains(Reg))
3107 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
3108
3112 }
3113
3114
3118 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
3119 Register BaseReg = this->TRI->getBaseRegister();
3122 }
3123
3124
3125 for (auto I = CSI.begin(), E = CSI.end(); I != E; ++I) {
3127 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
3128 continue;
3129
3134 else
3137 }
3140
3141 return true;
3142}
3143
3148
3149
3154 SavedRegs.set(BasePtr);
3155 }
3156}
3157
3161 I++) {
3162 if (I->hasNestAttr() && ->use_empty())
3163 return true;
3164 }
3165 return false;
3166}
3167
3168
3169
3170
3171
3175
3176
3178 if (Is64Bit)
3179 return Primary ? X86::R14 : X86::R13;
3180 else
3181 return Primary ? X86::EBX : X86::EDI;
3182 }
3183
3184 if (Is64Bit) {
3185 if (IsLP64)
3186 return Primary ? X86::R11 : X86::R12;
3187 else
3188 return Primary ? X86::R11D : X86::R12D;
3189 }
3190
3192
3196 if (IsNested)
3197 report_fatal_error("Segmented stacks does not support fastcall with "
3198 "nested function.");
3199 return Primary ? X86::EAX : X86::ECX;
3200 }
3201 if (IsNested)
3202 return Primary ? X86::EDX : X86::EAX;
3203 return Primary ? X86::ECX : X86::EAX;
3204}
3205
3206
3207
3209
3214 unsigned TlsReg, TlsOffset;
3216
3217
3218
3219 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
3220
3223 "Scratch register is live-in");
3224
3226 report_fatal_error("Segmented stacks do not support vararg functions.");
3230 report_fatal_error("Segmented stacks not supported on this platform.");
3231
3232
3233
3234
3236
3238 return;
3239
3243 bool IsNested = false;
3244
3245
3248
3249
3250
3251
3252 for (const auto &LI : PrologueMBB.liveins()) {
3255 }
3256
3257 if (IsNested)
3259
3262
3263
3264
3266
3267
3270 TlsReg = X86::FS;
3271 TlsOffset = IsLP64 ? 0x70 : 0x40;
3273 TlsReg = X86::GS;
3274 TlsOffset = 0x60 + 90 * 8;
3276 TlsReg = X86::GS;
3277 TlsOffset = 0x28;
3279 TlsReg = X86::FS;
3280 TlsOffset = 0x18;
3282 TlsReg = X86::FS;
3283 TlsOffset = 0x20;
3284 } else {
3285 report_fatal_error("Segmented stacks not supported on this platform.");
3286 }
3287
3288 if (CompareStackPointer)
3289 ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
3290 else
3291 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r),
3292 ScratchReg)
3298
3299 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm))
3306 } else {
3308 TlsReg = X86::GS;
3309 TlsOffset = 0x30;
3311 TlsReg = X86::GS;
3312 TlsOffset = 0x48 + 90 * 4;
3314 TlsReg = X86::FS;
3315 TlsOffset = 0x14;
3317 TlsReg = X86::FS;
3318 TlsOffset = 0x10;
3320 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
3321 } else {
3322 report_fatal_error("Segmented stacks not supported on this platform.");
3323 }
3324
3325 if (CompareStackPointer)
3326 ScratchReg = X86::ESP;
3327 else
3328 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg)
3334
3337 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
3345
3346
3347 unsigned ScratchReg2;
3348 bool SaveScratch2;
3349 if (CompareStackPointer) {
3350
3352 SaveScratch2 = false;
3353 } else {
3354
3356
3357
3358
3360 }
3361
3362
3364 "Scratch register is live-in and not saved");
3365
3366 if (SaveScratch2)
3367 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
3369
3370 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
3372 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
3374 .addReg(ScratchReg2)
3379
3380 if (SaveScratch2)
3381 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
3382 }
3383 }
3384
3385
3386
3388 .addMBB(&PrologueMBB)
3390
3391
3392
3394
3395
3396
3397 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
3398 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
3399 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
3400 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
3401
3402 if (IsNested)
3404
3409 Reg11)
3411 } else {
3412 BuildMI(allocMBB, DL, TII.get(X86::PUSH32i))
3415 }
3416
3417
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3435 report_fatal_error("Emitting morestack calls on 64-bit with the large "
3436 "code model and thunks not yet implemented.");
3437 BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
3443 } else {
3445 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
3447 else
3448 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
3450 }
3451
3452 if (IsNested)
3453 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
3454 else
3455 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
3456
3458
3461
3462#ifdef EXPENSIVE_CHECKS
3464#endif
3465}
3466
3467
3468
3469
3470
3473 for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
3475 if (Node->getNumOperands() != 2)
3476 continue;
3477 MDString *NodeName = dyn_cast(Node->getOperand(0));
3478 ValueAsMetadata *NodeVal = dyn_cast(Node->getOperand(1));
3479 if (!NodeName || !NodeVal)
3480 continue;
3481 ConstantInt *ValConst = dyn_cast_or_null(NodeVal->getValue());
3482 if (ValConst && NodeName->getString() == LiteralName) {
3484 }
3485 }
3486
3488 " required but not provided");
3489}
3490
3491
3492
3497 [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) &&
3499 return MI.isMetaInstruction();
3500 });
3501}
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3522
3523
3524
3525 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
3526
3527
3530 if (!HiPELiteralsMD)
3532 "Can't generate HiPE prologue without runtime parameters");
3534 HiPELiteralsMD, Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
3535 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
3536 const unsigned Guaranteed = HipeLeafWords * SlotSize;
3537 unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs
3539 : 0;
3541
3543 "HiPE prologue is only supported on Linux operating systems.");
3544
3545
3546
3547
3548
3549
3550
3551
3553 unsigned MoreStackForCalls = 0;
3554
3555 for (auto &MBB : MF) {
3557 if (.isCall())
3558 continue;
3559
3560
3562
3563
3565 continue;
3566
3568 if ()
3569 continue;
3570
3571
3572
3573
3574
3575
3576 if (F->getName().contains("erlang.") || F->getName().contains("bif_") ||
3578 continue;
3579
3580 unsigned CalleeStkArity = F->arg_size() > CCRegisteredArgs
3581 ? F->arg_size() - CCRegisteredArgs
3582 : 0;
3583 if (HipeLeafWords - 1 > CalleeStkArity)
3584 MoreStackForCalls =
3585 std::max(MoreStackForCalls,
3586 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
3587 }
3588 }
3589 MaxStack += MoreStackForCalls;
3590 }
3591
3592
3593
3594 if (MaxStack > Guaranteed) {
3597
3598 for (const auto &LI : PrologueMBB.liveins()) {
3601 }
3602
3605
3606 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
3607 unsigned LEAop, CMPop, CALLop;
3608 SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
3610 SPReg = X86::RSP;
3611 PReg = X86::RBP;
3612 LEAop = X86::LEA64r;
3613 CMPop = X86::CMP64rm;
3614 CALLop = X86::CALL64pcrel32;
3615 } else {
3616 SPReg = X86::ESP;
3617 PReg = X86::EBP;
3618 LEAop = X86::LEA32r;
3619 CMPop = X86::CMP32rm;
3620 CALLop = X86::CALLpcrel32;
3621 }
3622
3625 "HiPE prologue scratch register is live-in");
3626
3627
3629 false, -MaxStack);
3630
3632 PReg, false, SPLimitOffset);
3633 BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1))
3634 .addMBB(&PrologueMBB)
3636
3637
3640 false, -MaxStack);
3642 PReg, false, SPLimitOffset);
3643 BuildMI(incStackMBB, DL, TII.get(X86::JCC_1))
3644 .addMBB(incStackMBB)
3646
3647 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100});
3648 stackCheckMBB->addSuccessor(incStackMBB, {1, 100});
3649 incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
3650 incStackMBB->addSuccessor(incStackMBB, {1, 100});
3651 }
3652#ifdef EXPENSIVE_CHECKS
3654#endif
3655}
3656
3662 return false;
3663
3665 return false;
3666
3668
3669 if (NumPops != 1 && NumPops != 2)
3670 return false;
3671
3672
3673
3675 return false;
3677 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
3678 return false;
3679
3680 unsigned Regs[2];
3681 unsigned FoundRegs = 0;
3682
3684 const MachineOperand &RegMask = Prev->getOperand(1);
3685
3686 auto &RegClass =
3687 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
3688
3689 for (auto Candidate : RegClass) {
3690
3691
3692
3694 continue;
3695
3696
3697 if (MRI.isReserved(Candidate))
3698 continue;
3699
3700 bool IsDef = false;
3701 for (const MachineOperand &MO : Prev->implicit_operands()) {
3702 if (MO.isReg() && MO.isDef() &&
3703 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
3704 IsDef = true;
3705 break;
3706 }
3707 }
3708
3709 if (IsDef)
3710 continue;
3711
3712 Regs[FoundRegs++] = Candidate;
3713 if (FoundRegs == (unsigned)NumPops)
3714 break;
3715 }
3716
3717 if (FoundRegs == 0)
3718 return false;
3719
3720
3721 while (FoundRegs < (unsigned)NumPops)
3722 Regs[FoundRegs++] = Regs[0];
3723
3724 for (int i = 0; i < NumPops; ++i)
3726 Regs[i]);
3727
3728 return true;
3729}
3730
3735 unsigned Opcode = I->getOpcode();
3736 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
3737 DebugLoc DL = I->getDebugLoc();
3742
3743
3744
3745
3747 return I;
3748
3749 if (!reserveCallFrame) {
3750
3751
3752
3753
3754
3755
3756
3758
3762
3763
3764
3765
3766
3767
3768
3769
3770 bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty();
3771
3772 if (HasDwarfEHHandlers && !isDestroy &&
3776
3777 if (Amount == 0)
3778 return I;
3779
3780
3781
3782 Amount -= InternalAmt;
3783
3784
3785
3786
3787 if (isDestroy && InternalAmt && DwarfCFI && (MF))
3790
3791
3792 int64_t StackAdjustment = isDestroy ? Amount : -Amount;
3793
3794 if (StackAdjustment) {
3795
3796
3797
3800
3801 if (StackAdjustment) {
3802 if (!(F.hasMinSize() &&
3803 adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
3804 BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
3805 false);
3806 }
3807 }
3808
3810
3811
3812
3813
3814
3815
3816
3817 int64_t CfaAdjustment = -StackAdjustment;
3818
3819
3820 if (CfaAdjustment) {
3824 }
3825 }
3826
3827 return I;
3828 }
3829
3830 if (InternalAmt) {
3833 while (CI != B && !std::prev(CI)->isCall())
3834 --CI;
3835 BuildStackAdjustment(MBB, CI, DL, -InternalAmt, false);
3836 }
3837
3838 return I;
3839}
3840
3845 return true;
3846
3847
3848
3851 if (TLI.hasInlineStackProbe(MF) || TLI.hasStackProbeSymbol(MF))
3852 return false;
3853
3856}
3857
3860
3861
3862
3863
3864
3866 return false;
3867
3868
3869
3873
3875 return true;
3876
3877
3878
3879
3880
3882}
3883
3885
3886
3887 bool CompactUnwind =
3890 !CompactUnwind) &&
3891
3892
3893
3894
3895
3898}
3899
3902 const DebugLoc &DL, bool RestoreSP) const {
3906 "restoring EBP/ESI on non-32-bit target");
3907
3914
3915
3916
3918 int EHRegSize = MFI.getObjectSize(FI);
3919
3920 if (RestoreSP) {
3921
3923 X86::EBP, true, -EHRegSize)
3925 }
3926
3929 int EndOffset = -EHRegOffset - EHRegSize;
3931
3933
3941 assert(EndOffset >= 0 &&
3942 "end of registration object above normal EBP position!");
3943 } else if (UsedReg == BasePtr) {
3944
3948
3953 assert(UsedReg == BasePtr);
3955 UsedReg, true, Offset)
3957 } else {
3958 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
3959 }
3960 return MBBI;
3961}
3962
3965}
3966
3970}
3971
3979 FrameBase.Kind = DwarfFrameBase::CFA;
3982 return FrameBase;
3983 }
3984
3985 return DwarfFrameBase{DwarfFrameBase::Register, {FrameRegister}};
3986}
3987
3988namespace {
3989
3990struct X86FrameSortingObject {
3991 bool IsValid = false;
3992 unsigned ObjectIndex = 0;
3993 unsigned ObjectSize = 0;
3994 Align ObjectAlignment = Align(1);
3995 unsigned ObjectNumUses = 0;
3996};
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011struct X86FrameSortingComparator {
4012 inline bool operator()(const X86FrameSortingObject &A,
4013 const X86FrameSortingObject &B) const {
4014 uint64_t DensityAScaled, DensityBScaled;
4015
4016
4017
4018
4019 if (.IsValid)
4020 return false;
4021 if (.IsValid)
4022 return true;
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034 DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
4035 static_cast<uint64_t>(B.ObjectSize);
4036 DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
4037 static_cast<uint64_t>(A.ObjectSize);
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047 if (DensityAScaled == DensityBScaled)
4048 return A.ObjectAlignment < B.ObjectAlignment;
4049
4050 return DensityAScaled < DensityBScaled;
4051 }
4052};
4053}
4054
4055
4056
4057
4058
4062
4063
4064 if (ObjectsToAllocate.empty())
4065 return;
4066
4067
4068
4069
4070
4071
4072 std::vector SortingObjects(MFI.getObjectIndexEnd());
4073
4074
4075
4076 for (auto &Obj : ObjectsToAllocate) {
4077 SortingObjects[Obj].IsValid = true;
4078 SortingObjects[Obj].ObjectIndex = Obj;
4079 SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj);
4080
4082 if (ObjectSize == 0)
4083
4084 SortingObjects[Obj].ObjectSize = 4;
4085 else
4086 SortingObjects[Obj].ObjectSize = ObjectSize;
4087 }
4088
4089
4090 for (auto &MBB : MF) {
4092 if (MI.isDebugInstr())
4093 continue;
4095
4096 if (!MO.isFI())
4097 continue;
4098 int Index = MO.getIndex();
4099
4100
4102 SortingObjects[Index].IsValid)
4103 SortingObjects[Index].ObjectNumUses++;
4104 }
4105 }
4106 }
4107
4108
4109
4111
4112
4113
4114
4115
4116
4117 int i = 0;
4118 for (auto &Obj : SortingObjects) {
4119
4120 if (!Obj.IsValid)
4121 break;
4122 ObjectsToAllocate[i++] = Obj.ObjectIndex;
4123 }
4124
4125
4126 if (->hasStackRealignment(MF) && hasFP(MF))
4127 std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
4128}
4129
4130unsigned
4132
4133 unsigned Offset = 16;
4134
4136
4138
4139 Offset += getWinEHFuncletFrameSize(MF);
4141}
4142
4145
4146
4148
4149
4150
4153
4154
4155
4159 adjustFrameForMsvcCxxEh(MF);
4160 }
4161}
4162
4163void X86FrameLowering::adjustFrameForMsvcCxxEh(MachineFunction &MF) const {
4164
4165
4166
4167
4168
4171 int64_t MinFixedObjOffset = -SlotSize;
4173 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I));
4174
4177 int FrameIndex = H.CatchObj.FrameIndex;
4178 if (FrameIndex != INT_MAX) {
4179
4181 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
4182 MinFixedObjOffset -= MFI.getObjectSize(FrameIndex);
4184 }
4185 }
4186 }
4187
4188
4189 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
4190 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
4191 int UnwindHelpFI =
4194
4195
4196
4201
4204 UnwindHelpFI)
4206}
4207
4211
4214
4215
4217 MI->eraseFromParent();
4219 }
4220}
4221
4224
4225
4226
4231 if (NeedsRestore)
4233 IsSEH);
4234 }
4235}
4236
4237
4238
4241 unsigned NumSpilledRegs) {
4243 unsigned AllocSize = TRI->getSpillSize(*RC) * NumSpilledRegs;
4245 unsigned AlignedSize = alignTo(AllocSize, StackAlign);
4246 return AlignedSize - AllocSize;
4247}
4248
4249void X86FrameLowering::spillFPBPUsingSP(MachineFunction &MF,
4252 int SPAdjust) const {
4254
4256 DebugLoc DL = BeforeMI->getDebugLoc();
4257
4258
4259 if (FP.isValid()) {
4263 }
4264
4265
4270 }
4271
4272
4273 if (SPAdjust)
4275
4276
4277 if (FP.isValid() && needsDwarfCFI(MF)) {
4278
4279 unsigned CFIIndex =
4281 BuildMI(*MBB, BeforeMI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
4283
4284
4285
4288 int Offset = SPAdjust;
4290 Offset += TRI->getSpillSize(*TRI->getMinimalPhysRegClass(BP));
4291
4292
4293 if (TII.isFrameSetup(*BeforeMI)) {
4295 BeforeMI = std::next(BeforeMI);
4296 }
4300 unsigned DwarfStackPtr = TRI->getDwarfRegNum(StackPtr, true);
4301 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfStackPtr));
4303 CfaExpr.push_back(dwarf::DW_OP_deref);
4304 CfaExpr.push_back(dwarf::DW_OP_consts);
4307
4309 DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
4311 DefCfaExpr.append(CfaExpr.str());
4315 }
4316}
4317
4318void X86FrameLowering::restoreFPBPUsingSP(MachineFunction &MF,
4321 int SPAdjust) const {
4323
4324
4327 DebugLoc DL = AfterMI->getDebugLoc();
4328 if (SPAdjust)
4330
4331
4335 }
4336
4337
4338 if (FP.isValid()) {
4341
4342
4343 if (needsDwarfCFI(MF)) {
4344
4345 unsigned CFIIndex =
4347 BuildMI(*MBB, Pos, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
4349 }
4350 }
4351}
4352
4353void X86FrameLowering::saveAndRestoreFPBPUsingSP(
4356 assert(SpillFP || SpillBP);
4357
4360 unsigned NumRegs = 0;
4361
4362 if (SpillFP) {
4366 RC = TRI->getMinimalPhysRegClass(FP);
4367 ++NumRegs;
4368 }
4369 if (SpillBP) {
4373 RC = TRI->getMinimalPhysRegClass(BP);
4374 ++NumRegs;
4375 }
4377
4378 spillFPBPUsingSP(MF, BeforeMI, FP, BP, SPAdjust);
4379 restoreFPBPUsingSP(MF, AfterMI, FP, BP, SPAdjust);
4380}
4381
4382bool X86FrameLowering::skipSpillFPBP(
4384 if (MI->getOpcode() == X86::LCMPXCHG16B_SAVE_RBX) {
4385
4386
4387
4388
4389
4390 int FI;
4391 unsigned Reg;
4392 while (!(MI->getOpcode() == TargetOpcode::COPY &&
4393 MI->getOperand(1).getReg() == X86::RBX) &&
4395 ++MI;
4396 return true;
4397 }
4398 return false;
4399}
4400
4403 bool &AccessBP) {
4404 AccessFP = AccessBP = false;
4405 if (FP) {
4406 if (MI.findRegisterUseOperandIdx(FP, TRI, false) != -1 ||
4407 MI.findRegisterDefOperandIdx(FP, TRI, false, true) != -1)
4408 AccessFP = true;
4409 }
4410 if (BP) {
4411 if (MI.findRegisterUseOperandIdx(BP, TRI, false) != -1 ||
4412 MI.findRegisterDefOperandIdx(BP, TRI, false, true) != -1)
4413 AccessBP = true;
4414 }
4415 return AccessFP || AccessBP;
4416}
4417
4418
4419
4420
4422 if (.isCall())
4423 return false;
4424 if (InsideEHLabels)
4425 return true;
4426
4429 return false;
4430
4431
4434 if (MBBI->isCall())
4435 return false;
4436 return true;
4437}
4438
4439
4440
4441void X86FrameLowering::checkInterferedAccess(
4444 bool SpillBP) const {
4445 if (DefMI == KillMI)
4446 return;
4448 if (!SpillBP)
4449 return;
4450 } else {
4451 if (!SpillFP)
4452 return;
4453 }
4454
4455 auto MI = KillMI;
4458 [](const MachineOperand &MO) { return MO.isFI(); }))
4460 "Interference usage of base pointer/frame "
4461 "pointer.");
4462 MI++;
4463 }
4464}
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4490 if (TFI.hasFP(MF))
4494
4495
4496
4500 FP = 0;
4502 BP = 0;
4503 }
4504 if ( && !BP)
4505 return;
4506
4508 bool InsideEHLabels = false;
4512 continue;
4513 MI = *(std::prev(TermMI));
4514
4515 while (MI != ME) {
4516
4517
4518
4521 isInvoke(*MI, InsideEHLabels) || skipSpillFPBP(MF, MI)) {
4522 ++MI;
4523 continue;
4524 }
4525
4526 if (MI->getOpcode() == TargetOpcode::EH_LABEL) {
4527 InsideEHLabels = !InsideEHLabels;
4528 ++MI;
4529 continue;
4530 }
4531
4532 bool AccessFP, AccessBP;
4533
4535 ++MI;
4536 continue;
4537 }
4538
4539
4540
4541 bool FPLive = false, BPLive = false;
4542 bool SpillFP = false, SpillBP = false;
4544 do {
4545 SpillFP |= AccessFP;
4546 SpillBP |= AccessBP;
4547
4548
4549 if (FPLive && MI->findRegisterDefOperandIdx(FP, TRI, false, true) != -1)
4550 FPLive = false;
4551 if (FP && MI->findRegisterUseOperandIdx(FP, TRI, false) != -1)
4552 FPLive = true;
4553 if (BPLive && MI->findRegisterDefOperandIdx(BP, TRI, false, true) != -1)
4554 BPLive = false;
4555 if (BP && MI->findRegisterUseOperandIdx(BP, TRI, false) != -1)
4556 BPLive = true;
4557
4559 } while ((MI != ME) &&
4560 (FPLive || BPLive ||
4562
4563
4564 if (FPLive && !SpillBP)
4565 continue;
4566
4567
4568
4569 if (KillMI->isCall() && DefMI != ME) {
4570 auto FrameSetup = std::next(DefMI);
4571
4572
4573
4574 while (FrameSetup != ME && .isFrameSetup(*FrameSetup) &&
4575 !FrameSetup->isCall())
4576 ++FrameSetup;
4577
4578
4579 if (FrameSetup != ME && TII.isFrameSetup(*FrameSetup) &&
4580 (TII.getFrameSize(*FrameSetup) ||
4582 while (.isFrameInstr(*KillMI))
4583 --KillMI;
4584 DefMI = FrameSetup;
4586 ++MI;
4587 }
4588 }
4589
4590 checkInterferedAccess(MF, DefMI, KillMI, SpillFP, SpillBP);
4591
4592
4593 saveAndRestoreFPBPUsingSP(MF, &(*DefMI), &(*KillMI), SpillFP, SpillBP);
4594 }
4595 }
4596}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool isFuncletReturnInstr(const MachineInstr &MI)
static const uint64_t kSplitStackAvailable
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Given that RA is a live value
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static bool isTailCallOpcode(unsigned Opc)
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static constexpr Register SPReg
static constexpr Register FPReg
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool is64Bit(const char *name)
static unsigned calculateSetFPREG(uint64_t SPAdjust)
static unsigned GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary)
GetScratchRegister - Get a temp register for performing work in the segmented stack and the Erlang/Hi...
static unsigned getADDriOpcode(bool IsLP64)
static unsigned getPUSH2Opcode(const X86Subtarget &ST)
static unsigned getMOVriOpcode(bool Use64BitReg, int64_t Imm)
static unsigned getLEArOpcode(bool IsLP64)
static unsigned getSUBriOpcode(bool IsLP64)
static bool flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB)
Check if the flags need to be preserved before the terminators.
static bool isFPBPAccess(const MachineInstr &MI, Register FP, Register BP, const TargetRegisterInfo *TRI, bool &AccessFP, bool &AccessBP)
static bool isOpcodeRep(unsigned Opcode)
Return true if an opcode is part of the REP group of instructions.
static unsigned getANDriOpcode(bool IsLP64, int64_t Imm)
static bool isEAXLiveIn(MachineBasicBlock &MBB)
static int computeFPBPAlignmentGap(MachineFunction &MF, const TargetRegisterClass *RC, unsigned NumSpilledRegs)
static unsigned getADDrrOpcode(bool IsLP64)
static bool HasNestArgument(const MachineFunction *MF)
static unsigned getPOPOpcode(const X86Subtarget &ST)
static bool isInvoke(const MachineInstr &MI, bool InsideEHLabels)
static unsigned getPOP2Opcode(const X86Subtarget &ST)
static unsigned getHiPELiteral(NamedMDNode *HiPELiteralsMD, const StringRef LiteralName)
Lookup an ERTS parameter in the !hipe.literals named metadata node.
static bool blockEndIsUnreachable(const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI)
static unsigned getSUBrrOpcode(bool IsLP64)
static unsigned getPUSHOpcode(const X86Subtarget &ST)
static const unsigned FramePtr
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
reverse_iterator rend() const
bool empty() const
empty - Check if the array is empty.
reverse_iterator rbegin() const
LLVM Basic Block Representation.
iterator_range< const_set_bits_iterator > set_bits() const
static BranchProbability getOne()
static BranchProbability getZero()
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool needsUnwindTableEntry() const
True if this function needs an unwind table.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Module * getParent()
Get the module that this global value is contained inside of...
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool usesWindowsCFI() const
static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_def_cfa_register modifies a rule for computing CFA.
static MCCFIInstruction createGnuArgsSize(MCSymbol *L, int64_t Size, SMLoc Loc={})
A special wrapper for .cfi_escape that indicates GNU_ARGS_SIZE.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction createRememberState(MCSymbol *L, SMLoc Loc={})
.cfi_remember_state Save all current rules for all registers.
OpType getOperation() const
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int64_t Adjustment, SMLoc Loc={})
.cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but Offset is a relative value that is added/subt...
static MCCFIInstruction createRestoreState(MCSymbol *L, SMLoc Loc={})
.cfi_restore_state Restore the previously saved state.
const MCObjectFileInfo * getObjectFileInfo() const
const MCRegisterInfo * getRegisterInfo() const
void reportError(SMLoc L, const Twine &Msg)
MCSection * getCompactUnwindSection() const
MCRegAliasIterator enumerates all registers aliasing Reg.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
StringRef getString() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
bool hasEHPadSuccessor() const
bool isEHPad() const
Returns true if the block is a landing pad.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI, MCRegister Reg, const_iterator Before, unsigned Neighborhood=10) const
Return whether (physical) register Reg has been defined and not killed as of just before Before.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< iterator > terminators()
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
@ LQR_Live
Register is known to be (at least partially) live.
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool isCleanupFuncletEntry() const
Returns true if this is the entry block of a cleanup funclet.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool needsSplitStackProlog() const
Return true if this function requires a split stack prolog, even if it uses no stack space.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
void setCVBytesOfCalleeSavedRegisters(unsigned S)
int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasCopyImplyingStackAdjustment() const
Returns true if the function contains operations which will lower down to instructions which manipula...
bool hasStackObjects() const
Return true if there are any stack objects in this function.
int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)
Create a spill slot at a fixed location on the stack.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setStackSize(uint64_t Size)
Set the size of the stack.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getObjectIndexBegin() const
Return the minimum frame object index.
void setOffsetAdjustment(int64_t Adj)
Set the correction for frame offsets.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
unsigned addFrameInst(const MCCFIInstruction &Inst)
void setHasWinCFI(bool v)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const std::vector< MCCFIInstruction > & getFrameInstructions() const
Returns a reference to a list of cfi instructions in the function's prologue.
bool hasInlineAsm() const
Returns true if the function contains any inline assembly.
void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair, unsigned SubReg=0)
Create a substitution between one <instr,operand> value to a different, new value.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
bool callsUnwindInit() const
void push_front(MachineBasicBlock *MBB)
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MCContext & getContext() const
bool callsEHReturn() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const std::vector< LandingPadInfo > & getLandingPads() const
Return a reference to the landing pad info for the current function.
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getNumOperands() const
Retuns the total number of operands.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
bool isLiveIn(Register Reg) const
NamedMDNode * getNamedMetadata(StringRef Name) const
Return the first NamedMDNode in the module with the specified name.
unsigned getCodeViewFlag() const
Returns the CodeView Version by checking module flags.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
MDNode * getOperand(unsigned i) const
unsigned getNumOperands() const
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
Represents a location in source code.
SlotIndex - An opaque wrapper around machine indexes.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
void append(StringRef RHS)
Append from a StringRef.
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
static constexpr size_t npos
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
const Triple & getTargetTriple() const
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
SwiftAsyncFramePointerMode SwiftAsyncFramePointer
Control when and how the Swift async frame pointer bit should be set.
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual Register getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
bool isOSWindows() const
Tests whether the OS is Windows.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
bool has128ByteRedZone(const MachineFunction &MF) const
Return true if the function has a redzone (accessible bytes past the frame of the top of stack functi...
void spillFPBP(MachineFunction &MF) const override
If a function uses base pointer and the base pointer is clobbered by inline asm, RA doesn't detect th...
bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override
canSimplifyCallFramePseudos - If there is a reserved call frame, the call frame pseudos can be simpli...
bool needsFrameIndexResolution(const MachineFunction &MF) const override
X86FrameLowering(const X86Subtarget &STI, MaybeAlign StackAlignOverride)
const X86RegisterInfo * TRI
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
bool hasFPImpl(const MachineFunction &MF) const override
hasFPImpl - Return true if the specified function should have a dedicated frame pointer register.
MachineBasicBlock::iterator restoreWin32EHStackPointers(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool RestoreSP=false) const
Sets up EBP and optionally ESI based on the incoming EBP value.
int getInitialCFAOffset(const MachineFunction &MF) const override
Return initial CFA offset value i.e.
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
void emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog, std::optional< MachineFunction::DebugInstrOperandPair > InstrNum=std::nullopt) const
Emit target stack probe code.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool IsPrologue) const
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReferenceSP(const MachineFunction &MF, int FI, Register &SPReg, int Adjustment) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
bool enableShrinkWrapping(const MachineFunction &MF) const override
Returns true if the target will correctly handle shrink wrapping.
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
void inlineStackProbe(MachineFunction &MF, MachineBasicBlock &PrologMBB) const override
Replace a StackProbe inline-stub with the actual probe code inline.
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &DL, int64_t NumBytes, bool InEpilogue) const
Emit a series of instructions to increment / decrement the stack pointer by a constant value.
bool canUseAsEpilogue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a epilogue for the target.
bool Is64Bit
Is64Bit implies that x86_64 instructions are available.
Register getInitialCFARegister(const MachineFunction &MF) const override
Return initial CFA register value i.e.
bool Uses64BitFramePtr
True if the 64-bit frame or stack pointer should be used.
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
void adjustForSegmentedStacks(MachineFunction &MF, MachineBasicBlock &PrologueMBB) const override
Adjust the prologue to have the function use segmented stacks.
DwarfFrameBase getDwarfFrameBase(const MachineFunction &MF) const override
Return the frame base information to be encoded in the DWARF subprogram debug info.
void emitCalleeSavedFrameMovesFullCFA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const override
Emits Dwarf Info specifying offsets of callee saved registers and frame pointer.
int getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, Register &SPReg) const
bool canUseLEAForSPInEpilogue(const MachineFunction &MF) const
Check that LEA can be used on SP in an epilogue sequence for MF.
bool stackProbeFunctionModifiesSP() const override
Does the stack probe function call return with a modified stack pointer?
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack.
void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const MCCFIInstruction &CFIInst, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Wraps up getting a CFI index and building a MachineInstr for it.
int mergeSPUpdates(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, bool doMergeWithPrevious) const
Check the instruction before/after the passed instruction.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
Same as getFrameIndexReference, except that the stack pointer (as opposed to the frame pointer) will ...
void restoreWinEHStackPointersInParent(MachineFunction &MF) const
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
void adjustForHiPEPrologue(MachineFunction &MF, MachineBasicBlock &PrologueMBB) const override
Erlang programs may need a special prologue to handle the stack size they might need at runtime.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const override
int64_t getFrameAdjustment(const MachineInstr &I) const
Returns the stack pointer adjustment that happens inside the frame setup..destroy sequence (e....
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
bool getForceFramePointer() const
void setPadForPush2Pop2(bool V)
bool isCandidateForPush2Pop2(Register Reg) const
unsigned getArgumentStackSize() const
bool getFPClobberedByCall() const
int getRestoreBasePointerOffset() const
int getSEHFramePtrSaveIndex() const
bool hasCFIAdjustCfa() const
int getTCReturnAddrDelta() const
void setRestoreBasePointer(const MachineFunction *MF)
bool getHasSEHFramePtrSave() const
DenseMap< int, unsigned > & getWinEHXMMSlotInfo()
bool getBPClobberedByCall() const
void setUsesRedZone(bool V)
bool hasPreallocatedCall() const
bool hasSwiftAsyncContext() const
void setHasSEHFramePtrSave(bool V)
bool getRestoreBasePointer() const
MachineInstr * getStackPtrSaveMI() const
size_t getNumCandidatesForPush2Pop2() const
AMXProgModelEnum getAMXProgModel() const
void addCandidateForPush2Pop2(Register Reg)
unsigned getCalleeSavedFrameSize() const
bool getHasPushSequences() const
bool padForPush2Pop2() const
void setStackPtrSaveMI(MachineInstr *MI)
bool getUsesRedZone() const
void setCalleeSavedFrameSize(unsigned bytes)
void setSEHFramePtrSaveIndex(int Index)
bool hasBasePointer(const MachineFunction &MF) const
Register getFrameRegister(const MachineFunction &MF) const override
unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI) const
findDeadCallerSavedReg - Return a caller-saved register that isn't live when it reaches the "return" ...
Register getStackRegister() const
unsigned getSlotSize() const
Register getFramePtr() const
Returns physical register used as frame pointer.
Register getBaseRegister() const
const X86TargetLowering * getTargetLowering() const override
bool isTargetDragonFly() const
bool isTargetWindowsMSVC() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
bool isTargetDarwin() const
bool isTargetWin64() const
bool isTarget64BitLP64() const
Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
bool swiftAsyncContextIsDynamicallySet() const
Return whether FrameLowering should always set the "extended frame present" bit in FP,...
bool isTargetWindowsCoreCLR() const
const X86InstrInfo * getInstrInfo() const override
bool isCallingConvWin64(CallingConv::ID CC) const
bool isTargetFreeBSD() const
bool isTargetNaCl64() const
bool isTargetWin32() const
bool useIndirectThunkCalls() const
bool isTargetLinux() const
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
StringRef getStackProbeSymbolName(const MachineFunction &MF) const override
Returns the name of the symbol used to emit stack probes or the empty string if not applicable.
bool hasStackProbeSymbol(const MachineFunction &MF) const override
Returns true if stack probing through a function call is requested.
unsigned getStackProbeSize(const MachineFunction &MF) const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
uint16_t StackAdjustment(const RuntimeFunction &RF)
StackAdjustment - calculated stack adjustment in words.
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
@ X86_INTR
x86 hardware interrupt context.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ X86_FastCall
'fast' analog of X86_StdCall.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
@ DwarfCFI
DWARF-like instruction based exceptions.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
IterT skipDebugInstructionsForward(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It until it points to a non-debug instruction or to End and return the resulting iterator.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
static const MachineInstrBuilder & addRegOffset(const MachineInstrBuilder &MIB, unsigned Reg, bool isKill, int Offset)
addRegOffset - This function is used to add a memory reference of the form [Reg + Offset],...
@ Always
Always set the bit.
@ Never
Never set the bit.
@ DeploymentBased
Determine whether to set the bit statically or dynamically based on the deployment target.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
IterT skipDebugInstructionsBackward(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It until it points to a non-debug instruction or to Begin and return the resulting iterator...
unsigned getUndefRegState(bool B)
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
unsigned encodeSLEB128(int64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a SLEB128 value to an output stream.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
unsigned encodeULEB128(uint64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a ULEB128 value to an output stream.
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Pair of physical register and lane mask.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
union llvm::TargetFrameLowering::DwarfFrameBase::@248 Location
enum llvm::TargetFrameLowering::DwarfFrameBase::FrameBaseKind Kind
SmallVector< WinEHTryBlockMapEntry, 4 > TryBlockMap
SmallVector< WinEHHandlerType, 1 > HandlerArray