LLVM: lib/Target/X86/X86FrameLowering.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
37#include
38
39#define DEBUG_TYPE "x86-fl"
40
41STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue");
43 "Number of extra stack probes generated in prologue");
44STATISTIC(NumFunctionUsingPush2Pop2, "Number of funtions using push2/pop2");
45
46using namespace llvm;
47
52 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
53
57
60}
61
66}
67
68
69
70
71
76 (hasFP(MF) && ->hasStackRealignment(MF)) ||
78}
79
80
81
82
83
84
85
86
91}
92
93
94
95
106}
107
109 return IsLP64 ? X86::SUB64ri32 : X86::SUB32ri;
110}
111
113 return IsLP64 ? X86::ADD64ri32 : X86::ADD32ri;
114}
115
117 return IsLP64 ? X86::SUB64rr : X86::SUB32rr;
118}
119
121 return IsLP64 ? X86::ADD64rr : X86::ADD32rr;
122}
123
125 return IsLP64 ? X86::AND64ri32 : X86::AND32ri;
126}
127
129 return IsLP64 ? X86::LEA64r : X86::LEA32r;
130}
131
133 if (Use64BitReg) {
134 if (isUInt<32>(Imm))
135 return X86::MOV32ri64;
136 if (isInt<32>(Imm))
137 return X86::MOV64ri32;
138 return X86::MOV64ri;
139 }
140 return X86::MOV32ri;
141}
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
161 return ST.is64Bit() ? (ST.hasPPX() ? X86::PUSHP64r : X86::PUSH64r)
162 : X86::PUSH32r;
163}
165 return ST.is64Bit() ? (ST.hasPPX() ? X86::POPP64r : X86::POP64r)
166 : X86::POP32r;
167}
169 return ST.hasPPX() ? X86::PUSH2P : X86::PUSH2;
170}
172 return ST.hasPPX() ? X86::POP2P : X86::POP2;
173}
174
178
179 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
180 Reg == X86::AH || Reg == X86::AL)
181 return true;
182 }
183
184 return false;
185}
186
187
188
189
190
191static bool
194 bool BreakNext = false;
196 if (!MO.isReg())
197 continue;
199 if (Reg != X86::EFLAGS)
200 continue;
201
202
203
204
205 if (!MO.isDef())
206 return true;
207
208
209
210 BreakNext = true;
211 }
212
213 if (BreakNext)
214 return false;
215 }
216
217
218
220 if (Succ->isLiveIn(X86::EFLAGS))
221 return true;
222
223 return false;
224}
225
226
227
230 const DebugLoc &DL, int64_t NumBytes,
231 bool InEpilogue) const {
232 bool isSub = NumBytes < 0;
236
237 uint64_t Chunk = (1LL << 31) - 1;
238
243
244
245
246 if (EmitInlineStackProbe && !InEpilogue) {
247
248
249
251 return;
252 } else if (Offset > Chunk) {
253
254
255 unsigned Reg = 0;
257
259 Reg = Rax;
260 else
263
266 if (Reg) {
268 Reg)
274 MI->getOperand(3).setIsDead();
275 return;
276 } else if (Offset > 8 * Chunk) {
277
278
279
280
281
282
283
288
289
290 if (isSub)
292 else
295 Rax)
301 MI->getOperand(3).setIsDead();
302
306
309 return;
310 }
311 }
312
316
317
318 unsigned Reg = isSub ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
320 if (Reg) {
321 unsigned Opc = isSub ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
322 : (Is64Bit ? X86::POP64r : X86::POP32r);
327 continue;
328 }
329 }
330
331 BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue)
333
335 }
336}
337
341 assert(Offset != 0 && "zero offset stack adjustment requested");
342
343
344
345 bool UseLEA;
346 if (!InEpilogue) {
347
348
349
350
351 UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS);
352 } else {
353
354
355
356
357
359 if (UseLEA && .useLeaForSP())
361
362
364 "We shouldn't have allowed this insertion point");
365 }
366
368 if (UseLEA) {
373 } else {
374 bool IsSub = Offset < 0;
381 MI->getOperand(3).setIsDead();
382 }
383 return MI;
384}
385
388 bool doMergeWithPrevious) const {
389 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
390 (!doMergeWithPrevious && MBBI == MBB.end()))
391 return 0;
392
394
396
397
398
399
400
401
402
403
404
405
406
407 if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction())
408 PI = std::prev(PI);
409
410 unsigned Opc = PI->getOpcode();
412
413 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD32ri) &&
414 PI->getOperand(0).getReg() == StackPtr) {
416 Offset = PI->getOperand(2).getImm();
417 } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
418 PI->getOperand(0).getReg() == StackPtr &&
419 PI->getOperand(1).getReg() == StackPtr &&
420 PI->getOperand(2).getImm() == 1 &&
421 PI->getOperand(3).getReg() == X86::NoRegister &&
422 PI->getOperand(5).getReg() == X86::NoRegister) {
423
424 Offset = PI->getOperand(4).getImm();
425 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB32ri) &&
426 PI->getOperand(0).getReg() == StackPtr) {
428 Offset = -PI->getOperand(2).getImm();
429 } else
430 return 0;
431
433 if (PI != MBB.end() && PI->isCFIInstruction()) {
439 }
440 if (!doMergeWithPrevious)
442
444}
445
452 unsigned CFIIndex = MF.addFrameInst(CFIInst);
453
456
460}
461
462
463
467 if ((MF)) {
469 return;
470 }
473 const Register MachineFramePtr =
476 unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true);
477
482}
483
486 const DebugLoc &DL, bool IsPrologue) const {
491
492
494
495
499 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
500
501 if (IsPrologue) {
503
504
505
506
507
510 CfaExpr.push_back(dwarf::DW_CFA_expression);
515 const Register MachineFramePtr =
519 unsigned DwarfFramePtr = MRI->getDwarfRegNum(MachineFramePtr, true);
520 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfFramePtr));
525 } else {
528 }
529 } else {
532 }
533 }
535 int FI = MI->getOperand(1).getIndex();
539 const Register MachineFramePtr =
543 unsigned DwarfFramePtr = MRI->getDwarfRegNum(MachineFramePtr, true);
544 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfFramePtr));
547 CfaExpr.push_back(dwarf::DW_OP_deref);
548
550 DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
552 DefCfaExpr.append(CfaExpr.str());
553
557 }
558}
559
560void X86FrameLowering::emitZeroCallUsedRegs(BitVector RegsToZero,
563
564
566
567
571
572
573
576 if (!X86::RFP80RegClass.contains(Reg))
577 continue;
578
579 unsigned NumFPRegs = ST.is64Bit() ? 8 : 7;
580 for (unsigned i = 0; i != NumFPRegs; ++i)
582
583 for (unsigned i = 0; i != NumFPRegs; ++i)
585 break;
586 }
587
588
591 if (TRI->isGeneralPurposeRegister(MF, Reg)) {
593 RegsToZero.reset(Reg);
594 }
595
596
597 for (MCRegister Reg : GPRsToZero.set_bits())
599
600
603}
604
608 std::optionalMachineFunction::DebugInstrOperandPair InstrNum) const {
611 if (InProlog) {
613 .addImm(0 );
614 } else {
615 emitStackProbeInline(MF, MBB, MBBI, DL, false);
616 }
617 } else {
618 emitStackProbeCall(MF, MBB, MBBI, DL, InProlog, InstrNum);
619 }
620}
621
624}
625
629 return MI.getOpcode() == X86::STACKALLOC_W_PROBING;
630 });
631 if (Where != PrologMBB.end()) {
633 emitStackProbeInline(MF, PrologMBB, Where, DL, true);
634 Where->eraseFromParent();
635 }
636}
637
638void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
642 bool InProlog) const {
645 emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog);
646 else
647 emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog);
648}
649
650void X86FrameLowering::emitStackProbeInlineGeneric(
655
659 "different expansion expected for CoreCLR 64 bit");
660
661 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
662 uint64_t ProbeChunk = StackProbeSize * 8;
663
665 TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0;
666
667
668
669
670 if (Offset > ProbeChunk) {
671 emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset,
672 MaxAlign % StackProbeSize);
673 } else {
674 emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset,
675 MaxAlign % StackProbeSize);
676 }
677}
678
679void X86FrameLowering::emitStackProbeInlineGenericBlock(
682 uint64_t AlignOffset) const {
683
684 const bool NeedsDwarfCFI = needsDwarfCFI(MF);
688 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
689 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
690
692
693 assert(AlignOffset < StackProbeSize);
694
695
696 if (StackProbeSize < Offset + AlignOffset) {
697
699 BuildStackAdjustment(MBB, MBBI, DL, -StackAdjustment, false)
701 if (!HasFP && NeedsDwarfCFI) {
705 }
706
712 NumFrameExtraProbe++;
713 CurrentOffset = StackProbeSize - AlignOffset;
714 }
715
716
717
718
719 while (CurrentOffset + StackProbeSize < Offset) {
720 BuildStackAdjustment(MBB, MBBI, DL, -StackProbeSize, false)
722
723 if (!HasFP && NeedsDwarfCFI) {
727 }
733 NumFrameExtraProbe++;
734 CurrentOffset += StackProbeSize;
735 }
736
737
740
741
742 unsigned Reg = Is64Bit ? X86::RAX : X86::EAX;
743 unsigned Opc = Is64Bit ? X86::PUSH64r : X86::PUSH32r;
747 } else {
748 BuildStackAdjustment(MBB, MBBI, DL, -ChunkSize, false)
750 }
751
752
753}
754
755void X86FrameLowering::emitStackProbeInlineGenericLoop(
758 uint64_t AlignOffset) const {
760
763 "Inline stack probe loop will clobber live EFLAGS.");
764
765 const bool NeedsDwarfCFI = needsDwarfCFI(MF);
769 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
770 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
771
772 if (AlignOffset) {
773 if (AlignOffset < StackProbeSize) {
774
775 BuildStackAdjustment(MBB, MBBI, DL, -AlignOffset, false)
777
783 NumFrameExtraProbe++;
784 Offset -= AlignOffset;
785 }
786 }
787
788
789 NumFrameLoopProbe++;
791
794
796 MF.insert(MBBIter, testMBB);
797 MF.insert(MBBIter, tailMBB);
798
801 : X86::EAX;
802
803
804 {
806
807
808
809
810 bool canUseSub =
812
813 if (canUseSub) {
815
820 .addReg(FinalStackProbed)
825 .addImm(-BoundOffset)
828 .addReg(FinalStackProbed)
831 } else {
832
833
834
836 }
837
838
839
840 if (!HasFP && NeedsDwarfCFI) {
841
842
843 const Register DwarfFinalStackProbed =
846 : FinalStackProbed;
847
850 nullptr, TRI->getDwarfRegNum(DwarfFinalStackProbed, true)));
853 }
854 }
855
856
857 BuildStackAdjustment(*testMBB, testMBB->end(), DL, -StackProbeSize,
858 false)
860
861
867
868
871 .addReg(FinalStackProbed)
873
874
881
882
886
887
890 if (TailOffset) {
891 BuildStackAdjustment(*tailMBB, TailMBBIter, DL, -TailOffset,
892 false)
894 }
895
896
897 if (!HasFP && NeedsDwarfCFI) {
898
899
900 const Register DwarfStackPtr =
904
907 nullptr, TRI->getDwarfRegNum(DwarfStackPtr, true)));
908 }
909
910
912}
913
914void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
918 assert(STI.is64Bit() && "different expansion needed for 32 bit");
922
925 "Inline stack probe loop will clobber live EFLAGS.");
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
958
960 MF.insert(MBBIter, RoundMBB);
961 MF.insert(MBBIter, LoopMBB);
962 MF.insert(MBBIter, ContinueMBB);
963
964
968
969
970 const int64_t ThreadEnvironmentStackLimit = 0x10;
971 const int64_t PageSize = 0x1000;
972 const int64_t PageMask = ~(PageSize - 1);
973
974
975
979 SizeReg = InProlog ? X86::RAX : MRI.createVirtualRegister(RegClass),
980 ZeroReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
981 CopyReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
982 TestReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
983 FinalReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
984 RoundedReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
985 LimitReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
986 JoinReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
987 ProbeReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass);
988
989
990 int64_t RCXShadowSlot = 0;
991 int64_t RDXShadowSlot = 0;
992
993
994 if (InProlog) {
995
996
997
1001
1002
1003
1004
1005 const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX);
1006 const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX);
1007 int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0);
1008
1009
1010 if (IsRCXLiveIn)
1011 RCXShadowSlot = InitSlot;
1012 if (IsRDXLiveIn)
1013 RDXShadowSlot = InitSlot;
1014 if (IsRDXLiveIn && IsRCXLiveIn)
1015 RDXShadowSlot += 8;
1016
1017 if (IsRCXLiveIn)
1019 RCXShadowSlot)
1021 if (IsRDXLiveIn)
1023 RDXShadowSlot)
1025 } else {
1026
1028 }
1029
1030
1031
1043
1044
1045
1046
1047
1048
1049
1050
1055 .addImm(ThreadEnvironmentStackLimit)
1058
1060 .addMBB(ContinueMBB)
1062
1063
1064 if (InProlog)
1066 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg)
1070
1071
1072
1073
1074 if (!InProlog) {
1075 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg)
1080 }
1081
1082 if (InProlog)
1086
1087
1095
1096 if (InProlog)
1104
1106
1107
1108 if (InProlog) {
1109 if (RCXShadowSlot)
1111 TII.get(X86::MOV64rm), X86::RCX),
1112 X86::RSP, false, RCXShadowSlot);
1113 if (RDXShadowSlot)
1115 TII.get(X86::MOV64rm), X86::RDX),
1116 X86::RSP, false, RDXShadowSlot);
1117 }
1118
1119
1120
1121 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
1124
1125
1131
1132 if (InProlog) {
1135 }
1136
1137
1138 if (InProlog) {
1139 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) {
1141 }
1144 }
1147 }
1151 }
1152 }
1153}
1154
1155void X86FrameLowering::emitStackProbeCall(
1158 std::optionalMachineFunction::DebugInstrOperandPair InstrNum) const {
1160
1161
1163 report_fatal_error("Emitting stack probe calls on 64-bit with the large "
1164 "code model and indirect thunks not yet implemented.");
1165
1168 "Stack probe calls will clobber live EFLAGS.");
1169
1170 unsigned CallOp;
1172 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
1173 else
1174 CallOp = X86::CALLpcrel32;
1175
1177
1180
1181
1182
1184
1185
1189 } else {
1192 }
1193
1201
1204
1205
1206
1207
1208
1209
1210 ModInst =
1214 }
1215
1216
1217
1218
1219 if (InstrNum) {
1221
1224 } else {
1225
1226
1227 unsigned SPDefOperand = ModInst->getNumOperands() - 2;
1230 }
1231 }
1232
1233 if (InProlog) {
1234
1235 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)
1237 }
1238}
1239
1241
1242
1243 const uint64_t Win64MaxSEHOffset = 128;
1244 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
1245
1246 return SEHFrameOffset & -16;
1247}
1248
1249
1250
1251
1252
1254X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
1256 Align MaxAlign = MFI.getMaxAlign();
1259 if (HasRealign) {
1261 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
1262 else if (MaxAlign < SlotSize)
1264 }
1265
1267 if (HasRealign)
1268 MaxAlign = (MaxAlign > 16) ? MaxAlign : Align(16);
1269 else
1270 MaxAlign = Align(16);
1271 }
1272 return MaxAlign.value();
1273}
1274
1281
1285 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
1286 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
1287
1288
1289
1290
1291 if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) {
1292 {
1293 NumFrameLoopProbe++;
1302
1304 MF.insert(MBBIter, entryMBB);
1305 MF.insert(MBBIter, headMBB);
1306 MF.insert(MBBIter, bodyMBB);
1307 MF.insert(MBBIter, footMBB);
1308 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
1311 : X86::EAX;
1312
1313
1314 {
1315
1317 BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)
1321 BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed)
1322 .addReg(FinalStackProbed)
1325
1326
1327 MI->getOperand(3).setIsDead();
1328
1331 .addReg(FinalStackProbed)
1340 }
1341
1342
1343
1344 {
1348 .addImm(StackProbeSize)
1350
1354 .addReg(FinalStackProbed)
1356
1357
1362
1365 }
1366
1367
1368 {
1374
1378 .addImm(StackProbeSize)
1380
1381
1384 .addReg(FinalStackProbed)
1387
1388
1395 }
1396
1397
1398 {
1400 .addReg(FinalStackProbed)
1408 }
1409
1411 }
1412 } else {
1417
1418
1419 MI->getOperand(3).setIsDead();
1420 }
1421}
1422
1424
1425
1427 "MF used frame lowering for wrong subtarget");
1431}
1432
1433
1434
1435
1436bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const {
1438}
1439
1440bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const {
1442}
1443
1444
1446 switch (Opcode) {
1447 case X86::REPNE_PREFIX:
1448 case X86::REP_MOVSB_32:
1449 case X86::REP_MOVSB_64:
1450 case X86::REP_MOVSD_32:
1451 case X86::REP_MOVSD_64:
1452 case X86::REP_MOVSQ_32:
1453 case X86::REP_MOVSQ_64:
1454 case X86::REP_MOVSW_32:
1455 case X86::REP_MOVSW_64:
1456 case X86::REP_PREFIX:
1457 case X86::REP_STOSB_32:
1458 case X86::REP_STOSB_64:
1459 case X86::REP_STOSD_32:
1460 case X86::REP_STOSD_64:
1461 case X86::REP_STOSQ_32:
1462 case X86::REP_STOSQ_64:
1463 case X86::REP_STOSW_32:
1464 case X86::REP_STOSW_64:
1465 return true;
1466 default:
1467 break;
1468 }
1469 return false;
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1560 "MF used frame lowering for wrong subtarget");
1565 uint64_t MaxAlign = calculateMaxStackAlign(MF);
1571 bool FnHasClrFunclet =
1573 bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
1574 bool HasFP = hasFP(MF);
1575 bool IsWin64Prologue = isWin64Prologue(MF);
1577
1580 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
1581 bool NeedsDwarfCFI = needsDwarfCFI(MF);
1583 const Register MachineFramePtr =
1587 bool HasWinCFI = false;
1588
1589
1590
1593
1594
1596
1597
1598 ArgBaseReg = MI->getOperand(0).getReg();
1599
1600
1601
1602
1604 ArgBaseReg)
1607 .addUse(X86::NoRegister)
1609 .addUse(X86::NoRegister)
1611 if (NeedsDwarfCFI) {
1612
1613 unsigned DwarfStackPtr = TRI->getDwarfRegNum(ArgBaseReg, true);
1617 }
1623 .addReg(X86::NoRegister)
1625 .addReg(X86::NoRegister)
1627 }
1628
1629
1630
1632 if (TailCallArgReserveSize && IsWin64Prologue)
1633 report_fatal_error("Can't handle guaranteed tail call under win64 yet");
1634
1635 const bool EmitStackProbeCall =
1638
1643
1644
1646 .addUse(MachineFramePtr)
1649 .addUse(X86::NoRegister)
1652 .addUse(X86::NoRegister);
1653 break;
1654 }
1655 [[fallthrough]];
1656
1659 !IsWin64Prologue &&
1660 "win64 prologue does not set the bit 60 in the saved frame pointer");
1662 .addUse(MachineFramePtr)
1665 break;
1666
1668 break;
1669 }
1670 }
1671
1672
1673
1674
1677 StackSize += 8;
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1692 }
1693
1694
1695
1696
1697
1698
1702 !EmitStackProbeCall &&
1707 if (HasFP)
1709 X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
1710 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
1712 }
1713
1714
1715
1716
1717 if (TailCallArgReserveSize != 0) {
1718 BuildStackAdjustment(MBB, MBBI, DL, -(int)TailCallArgReserveSize,
1719 false)
1721 }
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1738 int stackGrowth = -SlotSize;
1739
1740
1741 Register Establisher = X86::NoRegister;
1742 if (IsClrFunclet)
1744 else if (IsFunclet)
1746
1747 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {
1748
1749
1750
1751 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1753 .addReg(Establisher)
1756 }
1757
1758 if (HasFP) {
1760
1761
1763 NumBytes =
1765
1766
1767 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)
1768 NumBytes = alignTo(NumBytes, MaxAlign);
1769
1770
1775
1776 if (NeedsDwarfCFI && !ArgBaseReg.isValid()) {
1777
1778
1782 nullptr, -2 * stackGrowth + (int)TailCallArgReserveSize),
1784
1785
1786 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1789 2 * stackGrowth -
1790 (int)TailCallArgReserveSize),
1792 }
1793
1794 if (NeedsWinCFI) {
1795 HasWinCFI = true;
1799 }
1800
1801 if (!IsFunclet) {
1803 assert(!IsWin64Prologue &&
1804 "win64 prologue does not store async context right below rbp");
1806
1807
1808
1809
1810 if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) {
1811
1812
1817 } else {
1818
1819
1823 }
1824
1825 if (NeedsWinCFI) {
1826 HasWinCFI = true;
1830 }
1831
1835 .addUse(X86::NoRegister)
1837 .addUse(X86::NoRegister)
1843 }
1844
1845 if (!IsWin64Prologue && !IsFunclet) {
1846
1853
1854 if (NeedsDwarfCFI) {
1855 if (ArgBaseReg.isValid()) {
1857 CfaExpr.push_back(dwarf::DW_CFA_expression);
1859 unsigned DwarfReg = TRI->getDwarfRegNum(MachineFramePtr, true);
1862 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
1864
1868 } else {
1869
1870
1871 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1876 }
1877 }
1878
1879 if (NeedsWinFPO) {
1880
1881 HasWinCFI = true;
1886 }
1887 }
1888 }
1889 } else {
1890 assert(!IsFunclet && "funclets without FPs not yet implemented");
1891 NumBytes =
1893 }
1894
1895
1896
1897 if (!IsFunclet) {
1898 if (HasFP && TRI->hasStackRealignment(MF))
1900 else
1902 }
1903
1904
1905
1906 unsigned ParentFrameNumBytes = NumBytes;
1907 if (IsFunclet)
1908 NumBytes = getWinEHFuncletFrameSize(MF);
1909
1910
1911 bool PushedRegs = false;
1916 return false;
1917 unsigned Opc = MBBI->getOpcode();
1918 return Opc == X86::PUSH32r || Opc == X86::PUSH64r || Opc == X86::PUSHP64r ||
1919 Opc == X86::PUSH2 || Opc == X86::PUSH2P;
1920 };
1921
1922 while (IsCSPush(MBBI)) {
1923 PushedRegs = true;
1924 Register Reg = MBBI->getOperand(0).getReg();
1925 LastCSPush = MBBI;
1927 unsigned Opc = LastCSPush->getOpcode();
1928
1929 if (!HasFP && NeedsDwarfCFI) {
1930
1931
1933
1934
1935 if (Opc == X86::PUSH2 || Opc == X86::PUSH2P)
1941 }
1942
1943 if (NeedsWinCFI) {
1944 HasWinCFI = true;
1948 if (Opc == X86::PUSH2 || Opc == X86::PUSH2P)
1950 .addImm(LastCSPush->getOperand(1).getReg())
1952 }
1953 }
1954
1955
1956
1957
1958 if (!IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF) &&
1959 !ArgBaseReg.isValid()) {
1960 assert(HasFP && "There should be a frame pointer if stack is realigned.");
1962
1963 if (NeedsWinCFI) {
1964 HasWinCFI = true;
1968 }
1969 }
1970
1971
1972
1973
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986 uint64_t AlignedNumBytes = NumBytes;
1987 if (IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF))
1988 AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
1989 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) {
1991 "The Red Zone is not accounted for in stack probes");
1992
1993
1995
1996 if (isEAXAlive) {
1998
2002 } else {
2003
2007 }
2008 }
2009
2011
2012
2013 int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;
2017 } else {
2018
2019
2021 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
2023 }
2024
2025
2027
2028 if (isEAXAlive) {
2029
2033 StackPtr, false, NumBytes - 8);
2034 else
2036 StackPtr, false, NumBytes - 4);
2039 }
2040 } else if (NumBytes) {
2042 }
2043
2044 if (NeedsWinCFI && NumBytes) {
2045 HasWinCFI = true;
2049 }
2050
2051 int SEHFrameOffset = 0;
2052 unsigned SPOrEstablisher;
2053 if (IsFunclet) {
2054 if (IsClrFunclet) {
2055
2056
2057
2058
2059 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
2063 Establisher, false, PSPSlotOffset)
2066 ;
2067
2068
2070 false, PSPSlotOffset)
2071 .addReg(Establisher)
2073 NoInfo,
2076 }
2077 SPOrEstablisher = Establisher;
2078 } else {
2080 }
2081
2082 if (IsWin64Prologue && HasFP) {
2083
2084
2085
2087 if (SEHFrameOffset)
2089 SPOrEstablisher, false, SEHFrameOffset);
2090 else
2092 .addReg(SPOrEstablisher);
2093
2094
2095 if (NeedsWinCFI && !IsFunclet) {
2096 assert(!NeedsWinFPO && "this setframe incompatible with FPO data");
2097 HasWinCFI = true;
2100 .addImm(SEHFrameOffset)
2104 }
2105 } else if (IsFunclet && STI.is32Bit()) {
2106
2108
2109
2115
2117 false, EHRegOffset)
2119 }
2120 }
2121
2125
2126 if (NeedsWinCFI) {
2127 int FI;
2129 if (X86::FR64RegClass.contains(Reg)) {
2132 if (IsWin64Prologue && IsFunclet)
2134 else
2137 SEHFrameOffset;
2138
2139 HasWinCFI = true;
2140 assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data");
2145 }
2146 }
2147 }
2148 }
2149
2150 if (NeedsWinCFI && HasWinCFI)
2153
2154 if (FnHasClrFunclet && !IsFunclet) {
2155
2156
2157
2158 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
2162 PSPSlotOffset)
2167 }
2168
2169
2170
2171
2172 if (IsWin64Prologue && TRI->hasStackRealignment(MF)) {
2173 assert(HasFP && "There should be a frame pointer if stack is realigned.");
2174 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);
2175 }
2176
2177
2178 if (IsFunclet && STI.is32Bit())
2179 return;
2180
2181
2182
2183
2184
2186
2189 .addReg(SPOrEstablisher)
2192
2193
2197 .addReg(SPOrEstablisher)
2199 }
2200
2202
2203
2204
2205
2211 assert(UsedReg == BasePtr);
2215 }
2216 }
2217 if (ArgBaseReg.isValid()) {
2218
2220 int FI = MI->getOperand(1).getIndex();
2221 unsigned MOVmr = Is64Bit ? X86::MOV64mr : X86::MOV32mr;
2222
2226 }
2227
2228 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
2229
2230 if (!HasFP && NumBytes) {
2231
2237 }
2238
2239
2241 }
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2256 bool NeedsCLD = false;
2257
2260 if (MI.isCall()) {
2261 NeedsCLD = true;
2262 break;
2263 }
2264
2266 NeedsCLD = true;
2267 break;
2268 }
2269
2270 if (MI.isInlineAsm()) {
2271
2272
2273
2274 NeedsCLD = true;
2275 break;
2276 }
2277 }
2278 }
2279
2280 if (NeedsCLD) {
2283 }
2284 }
2285
2286
2288}
2289
2292
2293
2294
2295
2296
2297
2299}
2300
2302 switch (MI.getOpcode()) {
2303 case X86::CATCHRET:
2304 case X86::CLEANUPRET:
2305 return true;
2306 default:
2307 return false;
2308 }
2310}
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325unsigned
2326X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
2330 true)
2333 return static_cast<unsigned>(Offset);
2334}
2335
2336unsigned
2337X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
2339
2341
2343 unsigned XMMSize =
2344 WinEHXMMSlotInfo.size() * TRI->getSpillSize(X86::VR128RegClass);
2345
2346 unsigned UsedSize;
2350
2351
2352
2353 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;
2354 } else {
2355
2357 }
2358
2359
2360
2362
2363
2364 return FrameSizeMinusRBP + XMMSize - CSSize;
2365}
2366
2368 return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
2369 Opc == X86::TCRETURNmi || Opc == X86::TCRETURNri64 ||
2370 Opc == X86::TCRETURNdi64 || Opc == X86::TCRETURNmi64;
2371}
2372
2381 DL = MBBI->getDebugLoc();
2382
2387
2389 bool NeedsWin64CFI =
2392
2393
2395 uint64_t MaxAlign = calculateMaxStackAlign(MF);
2398 bool HasFP = hasFP(MF);
2400
2404
2407 unsigned Opc = X86::LEA32r;
2408 Register StackReg = X86::ESP;
2409 ArgBaseReg = MI->getOperand(0).getReg();
2410 if (STI.is64Bit()) {
2411 Opc = X86::LEA64r;
2412 StackReg = X86::RSP;
2413 }
2414
2415
2419 .addUse(X86::NoRegister)
2421 .addUse(X86::NoRegister)
2423 if (NeedsDwarfCFI) {
2424 unsigned DwarfStackPtr = TRI->getDwarfRegNum(StackReg, true);
2429 }
2431 }
2432
2433 if (IsFunclet) {
2434 assert(HasFP && "EH funclets without FP not yet implemented");
2435 NumBytes = getWinEHFuncletFrameSize(MF);
2436 } else if (HasFP) {
2437
2439 NumBytes = FrameSize - CSSize - TailCallArgReserveSize;
2440
2441
2442
2443 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)
2444 NumBytes = alignTo(FrameSize, MaxAlign);
2445 } else {
2446 NumBytes = StackSize - CSSize - TailCallArgReserveSize;
2447 }
2448 uint64_t SEHStackAllocAmt = NumBytes;
2449
2450
2452 if (HasFP) {
2454
2457 }
2458
2461 MachineFramePtr)
2463
2464
2465
2468 .addUse(MachineFramePtr)
2471 }
2472
2473 if (NeedsDwarfCFI) {
2474 if (!ArgBaseReg.isValid()) {
2475 unsigned DwarfStackPtr =
2476 TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true);
2480 }
2482 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
2487 --AfterPop;
2488 }
2490 }
2491 }
2492
2494
2497 unsigned Opc = PI->getOpcode();
2498
2499 if (Opc != X86::DBG_VALUE && !PI->isTerminator()) {
2501 (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::BTR64ri8 &&
2502 Opc != X86::ADD64ri32 && Opc != X86::POPP64r && Opc != X86::POP2 &&
2503 Opc != X86::POP2P && Opc != X86::LEA64r))
2504 break;
2505 FirstCSPop = PI;
2506 }
2507
2509 }
2510 if (ArgBaseReg.isValid()) {
2511
2513 int FI = MI->getOperand(1).getIndex();
2514 unsigned MOVrm = Is64Bit ? X86::MOV64rm : X86::MOV32rm;
2515
2518 }
2519 MBBI = FirstCSPop;
2520
2521 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)
2522 emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator);
2523
2525 DL = MBBI->getDebugLoc();
2526
2527
2530
2531
2532
2533
2534
2536 !IsFunclet) {
2537 if (TRI->hasStackRealignment(MF))
2538 MBBI = FirstCSPop;
2541 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
2542
2544 LEAAmount -= 16;
2545
2546
2547
2548
2549
2550
2551
2552
2553 if (LEAAmount != 0) {
2556 false, LEAAmount);
2558 } else {
2559 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
2562 }
2563 } else if (NumBytes) {
2564
2566 if (!HasFP && NeedsDwarfCFI) {
2567
2570 nullptr, CSSize + TailCallArgReserveSize + SlotSize),
2572 }
2574 }
2575
2576
2577
2578
2579
2580
2581
2582 if (NeedsWin64CFI && MF.hasWinCFI())
2584
2585 if (!HasFP && NeedsDwarfCFI) {
2586 MBBI = FirstCSPop;
2588
2589
2592 unsigned Opc = PI->getOpcode();
2594 if (Opc == X86::POP32r || Opc == X86::POP64r || Opc == X86::POPP64r ||
2595 Opc == X86::POP2 || Opc == X86::POP2P) {
2597
2598
2599 if (Opc == X86::POP2 || Opc == X86::POP2P)
2604 }
2605 }
2606 }
2607
2608
2609
2610
2613
2615
2617 assert(Offset >= 0 && "TCDelta should never be positive");
2619
2622 }
2623 }
2624
2625
2627 BuildMI(MBB, Terminator, DL, TII.get(X86::TILERELEASE));
2628}
2629
2631 int FI,
2634
2636
2637
2638
2641 else if (TRI->hasStackRealignment(MF))
2643 else
2645
2646
2647
2648
2649
2655 int64_t FPDelta = 0;
2656
2657
2658
2659
2660
2664 }
2665
2666 if (IsWin64Prologue) {
2668
2669
2671
2672
2675 uint64_t NumBytes = FrameSize - CSSize;
2676
2678 if (FI && FI == X86FI->getFAIndex())
2680
2681
2682
2683
2684
2685 FPDelta = FrameSize - SEHFrameOffset;
2687 "FPDelta isn't aligned per the Win64 ABI!");
2688 }
2689
2691
2693
2694
2696
2697
2699 if (TailCallReturnAddrDelta < 0)
2700 Offset -= TailCallReturnAddrDelta;
2701
2703 }
2704
2705
2706
2707
2711}
2712
2718 const auto it = WinEHXMMSlotInfo.find(FI);
2719
2720 if (it == WinEHXMMSlotInfo.end())
2722
2725 it->second;
2726}
2727
2731 int Adjustment) const {
2736}
2737
2741 bool IgnoreSPUpdates) const {
2742
2744
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2782
2783
2784
2785
2788
2789
2791 "we don't handle this case!");
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2819}
2820
2823 std::vector &CSI) const {
2826
2827 unsigned CalleeSavedFrameSize = 0;
2828 unsigned XMMCalleeSavedFrameSize = 0;
2831
2833
2834 if (TailCallReturnAddrDelta < 0) {
2835
2836
2837
2838
2839
2840
2841
2842
2843
2845 TailCallReturnAddrDelta - SlotSize, true);
2846 }
2847
2848
2849 if (this->TRI->hasBasePointer(MF)) {
2850
2855 }
2856 }
2857
2858 if (hasFP(MF)) {
2859
2860 SpillSlotOffset -= SlotSize;
2862
2863
2864
2866 SpillSlotOffset -= SlotSize;
2868 SpillSlotOffset -= SlotSize;
2869 }
2870
2871
2872
2873
2875 for (unsigned i = 0; i < CSI.size(); ++i) {
2876 if (TRI->regsOverlap(CSI[i].getReg(), FPReg)) {
2877 CSI.erase(CSI.begin() + i);
2878 break;
2879 }
2880 }
2881 }
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892 unsigned NumRegsForPush2 = 0;
2893 if (STI.hasPush2Pop2()) {
2895 return X86::GR64RegClass.contains(I.getReg());
2896 });
2897 bool NeedPadding = (SpillSlotOffset % 16 != 0) && (NumCSGPR % 2 == 0);
2898 bool UsePush2Pop2 = NeedPadding ? NumCSGPR > 2 : NumCSGPR > 1;
2900 NumRegsForPush2 = UsePush2Pop2 ? alignDown(NumCSGPR, 2) : 0;
2902 SpillSlotOffset -= SlotSize;
2904 }
2905 }
2906
2907
2910
2911 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
2912 continue;
2913
2914
2915
2917 (SpillSlotOffset % 16 == 0 ||
2920
2921 SpillSlotOffset -= SlotSize;
2922 CalleeSavedFrameSize += SlotSize;
2923
2926 }
2927
2928
2929
2931 SpillSlotOffset -= SlotSize;
2932 CalleeSavedFrameSize += SlotSize;
2933
2935
2937 }
2939 "Expect even candidates for push2/pop2");
2941 ++NumFunctionUsingPush2Pop2;
2944
2945
2948 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
2949 continue;
2950
2951
2952 MVT VT = MVT::Other;
2953 if (X86::VK16RegClass.contains(Reg))
2954 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2955
2957 unsigned Size = TRI->getSpillSize(*RC);
2958 Align Alignment = TRI->getSpillAlign(*RC);
2959
2960 assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86");
2961 SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment);
2962
2963
2964 SpillSlotOffset -= Size;
2968
2969
2970 if (X86::VR128RegClass.contains(Reg)) {
2971 WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize;
2972 XMMCalleeSavedFrameSize += Size;
2973 }
2974 }
2975
2976 return true;
2977}
2978
2983
2984
2985
2987 return true;
2988
2989
2994
2995
2996
2997 auto UpdateLiveInCheckCanKill = [&](Register Reg) {
2999
3000
3001
3002
3003
3004 if (MRI.isLiveIn(Reg))
3005 return false;
3007
3009 if (MRI.isLiveIn(*AReg))
3010 return false;
3011 return true;
3012 };
3013 auto UpdateLiveInGetKillRegState = [&](Register Reg) {
3015 };
3016
3017 for (auto RI = CSI.rbegin(), RE = CSI.rend(); RI != RE; ++RI) {
3018 Register Reg = RI->getReg();
3019 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
3020 continue;
3021
3025 .addReg(Reg, UpdateLiveInGetKillRegState(Reg))
3026 .addReg(Reg2, UpdateLiveInGetKillRegState(Reg2))
3028 } else {
3030 .addReg(Reg, UpdateLiveInGetKillRegState(Reg))
3032 }
3033 }
3034
3036 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
3037 Register BaseReg = this->TRI->getBaseRegister();
3041 }
3042
3043
3044
3047 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
3048 continue;
3049
3050
3051 MVT VT = MVT::Other;
3052 if (X86::VK16RegClass.contains(Reg))
3053 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
3054
3055
3058
3061 --MI;
3063 ++MI;
3064 }
3065
3066 return true;
3067}
3068
3072
3075 "SEH should not use CATCHRET");
3078
3079
3080 if (STI.is64Bit()) {
3081
3086 .addMBB(CatchRetTarget)
3088 } else {
3089
3091 .addMBB(CatchRetTarget);
3092 }
3093
3094
3095
3097}
3098
3102 if (CSI.empty())
3103 return false;
3104
3106
3107
3108 if (STI.is32Bit())
3109 return true;
3110
3111
3112 if (MI->getOpcode() == X86::CATCHRET) {
3116 if (IsSEH)
3117 return true;
3118 }
3119 }
3120
3122
3123
3126 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
3127 continue;
3128
3129
3130 MVT VT = MVT::Other;
3131 if (X86::VK16RegClass.contains(Reg))
3132 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
3133
3137 }
3138
3139
3143 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
3144 Register BaseReg = this->TRI->getBaseRegister();
3147 }
3148
3149
3150 for (auto I = CSI.begin(), E = CSI.end(); I != E; ++I) {
3152 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
3153 continue;
3154
3159 else
3162 }
3165
3166 return true;
3167}
3168
3173
3174
3179 SavedRegs.set(BasePtr);
3180 }
3181}
3182
3186 I++) {
3187 if (I->hasNestAttr() && ->use_empty())
3188 return true;
3189 }
3190 return false;
3191}
3192
3193
3194
3195
3196
3200
3201
3203 if (Is64Bit)
3204 return Primary ? X86::R14 : X86::R13;
3205 else
3206 return Primary ? X86::EBX : X86::EDI;
3207 }
3208
3209 if (Is64Bit) {
3210 if (IsLP64)
3211 return Primary ? X86::R11 : X86::R12;
3212 else
3213 return Primary ? X86::R11D : X86::R12D;
3214 }
3215
3217
3221 if (IsNested)
3222 report_fatal_error("Segmented stacks does not support fastcall with "
3223 "nested function.");
3224 return Primary ? X86::EAX : X86::ECX;
3225 }
3226 if (IsNested)
3227 return Primary ? X86::EDX : X86::EAX;
3228 return Primary ? X86::ECX : X86::EAX;
3229}
3230
3231
3232
3234
3239 unsigned TlsReg, TlsOffset;
3241
3242
3243
3244 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
3245
3248 "Scratch register is live-in");
3249
3251 report_fatal_error("Segmented stacks do not support vararg functions.");
3255 report_fatal_error("Segmented stacks not supported on this platform.");
3256
3257
3258
3259
3261
3263 return;
3264
3268 bool IsNested = false;
3269
3270
3273
3274
3275
3276
3277 for (const auto &LI : PrologueMBB.liveins()) {
3280 }
3281
3282 if (IsNested)
3284
3287
3288
3289
3291
3292
3295 TlsReg = X86::FS;
3296 TlsOffset = IsLP64 ? 0x70 : 0x40;
3298 TlsReg = X86::GS;
3299 TlsOffset = 0x60 + 90 * 8;
3301 TlsReg = X86::GS;
3302 TlsOffset = 0x28;
3304 TlsReg = X86::FS;
3305 TlsOffset = 0x18;
3307 TlsReg = X86::FS;
3308 TlsOffset = 0x20;
3309 } else {
3310 report_fatal_error("Segmented stacks not supported on this platform.");
3311 }
3312
3313 if (CompareStackPointer)
3314 ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
3315 else
3316 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r),
3317 ScratchReg)
3323
3324 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm))
3331 } else {
3333 TlsReg = X86::GS;
3334 TlsOffset = 0x30;
3336 TlsReg = X86::GS;
3337 TlsOffset = 0x48 + 90 * 4;
3339 TlsReg = X86::FS;
3340 TlsOffset = 0x14;
3342 TlsReg = X86::FS;
3343 TlsOffset = 0x10;
3345 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
3346 } else {
3347 report_fatal_error("Segmented stacks not supported on this platform.");
3348 }
3349
3350 if (CompareStackPointer)
3351 ScratchReg = X86::ESP;
3352 else
3353 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg)
3359
3362 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
3370
3371
3372 unsigned ScratchReg2;
3373 bool SaveScratch2;
3374 if (CompareStackPointer) {
3375
3377 SaveScratch2 = false;
3378 } else {
3379
3381
3382
3383
3385 }
3386
3387
3389 "Scratch register is live-in and not saved");
3390
3391 if (SaveScratch2)
3392 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
3394
3395 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
3397 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
3399 .addReg(ScratchReg2)
3404
3405 if (SaveScratch2)
3406 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
3407 }
3408 }
3409
3410
3411
3413 .addMBB(&PrologueMBB)
3415
3416
3417
3419
3420
3421
3422 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
3423 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
3424 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
3425 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
3426
3427 if (IsNested)
3429
3434 Reg11)
3436 } else {
3437 BuildMI(allocMBB, DL, TII.get(X86::PUSH32i))
3440 }
3441
3442
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3460 report_fatal_error("Emitting morestack calls on 64-bit with the large "
3461 "code model and thunks not yet implemented.");
3462 BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
3468 } else {
3470 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
3472 else
3473 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
3475 }
3476
3477 if (IsNested)
3478 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
3479 else
3480 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
3481
3483
3486
3487#ifdef EXPENSIVE_CHECKS
3489#endif
3490}
3491
3492
3493
3494
3495
3498 for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
3500 if (Node->getNumOperands() != 2)
3501 continue;
3502 MDString *NodeName = dyn_cast(Node->getOperand(0));
3503 ValueAsMetadata *NodeVal = dyn_cast(Node->getOperand(1));
3504 if (!NodeName || !NodeVal)
3505 continue;
3506 ConstantInt *ValConst = dyn_cast_or_null(NodeVal->getValue());
3507 if (ValConst && NodeName->getString() == LiteralName) {
3509 }
3510 }
3511
3513 " required but not provided");
3514}
3515
3516
3517
3522 [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) &&
3524 return MI.isMetaInstruction();
3525 });
3526}
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3547
3548
3549
3550 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
3551
3552
3555 if (!HiPELiteralsMD)
3557 "Can't generate HiPE prologue without runtime parameters");
3559 HiPELiteralsMD, Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
3560 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
3561 const unsigned Guaranteed = HipeLeafWords * SlotSize;
3562 unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs
3564 : 0;
3566
3568 "HiPE prologue is only supported on Linux operating systems.");
3569
3570
3571
3572
3573
3574
3575
3576
3578 unsigned MoreStackForCalls = 0;
3579
3580 for (auto &MBB : MF) {
3582 if (.isCall())
3583 continue;
3584
3585
3587
3588
3590 continue;
3591
3593 if ()
3594 continue;
3595
3596
3597
3598
3599
3600
3601 if (F->getName().contains("erlang.") || F->getName().contains("bif_") ||
3603 continue;
3604
3605 unsigned CalleeStkArity = F->arg_size() > CCRegisteredArgs
3606 ? F->arg_size() - CCRegisteredArgs
3607 : 0;
3608 if (HipeLeafWords - 1 > CalleeStkArity)
3609 MoreStackForCalls =
3610 std::max(MoreStackForCalls,
3611 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
3612 }
3613 }
3614 MaxStack += MoreStackForCalls;
3615 }
3616
3617
3618
3619 if (MaxStack > Guaranteed) {
3622
3623 for (const auto &LI : PrologueMBB.liveins()) {
3626 }
3627
3630
3631 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
3632 unsigned LEAop, CMPop, CALLop;
3633 SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
3635 SPReg = X86::RSP;
3636 PReg = X86::RBP;
3637 LEAop = X86::LEA64r;
3638 CMPop = X86::CMP64rm;
3639 CALLop = X86::CALL64pcrel32;
3640 } else {
3641 SPReg = X86::ESP;
3642 PReg = X86::EBP;
3643 LEAop = X86::LEA32r;
3644 CMPop = X86::CMP32rm;
3645 CALLop = X86::CALLpcrel32;
3646 }
3647
3650 "HiPE prologue scratch register is live-in");
3651
3652
3654 false, -MaxStack);
3655
3657 PReg, false, SPLimitOffset);
3658 BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1))
3659 .addMBB(&PrologueMBB)
3661
3662
3665 false, -MaxStack);
3667 PReg, false, SPLimitOffset);
3668 BuildMI(incStackMBB, DL, TII.get(X86::JCC_1))
3669 .addMBB(incStackMBB)
3671
3672 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100});
3673 stackCheckMBB->addSuccessor(incStackMBB, {1, 100});
3674 incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
3675 incStackMBB->addSuccessor(incStackMBB, {1, 100});
3676 }
3677#ifdef EXPENSIVE_CHECKS
3679#endif
3680}
3681
3687 return false;
3688
3690 return false;
3691
3693
3694 if (NumPops != 1 && NumPops != 2)
3695 return false;
3696
3697
3698
3700 return false;
3702 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
3703 return false;
3704
3705 unsigned Regs[2];
3706 unsigned FoundRegs = 0;
3707
3709 const MachineOperand &RegMask = Prev->getOperand(1);
3710
3711 auto &RegClass =
3712 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
3713
3714 for (auto Candidate : RegClass) {
3715
3716
3717
3719 continue;
3720
3721
3722 if (MRI.isReserved(Candidate))
3723 continue;
3724
3725 bool IsDef = false;
3726 for (const MachineOperand &MO : Prev->implicit_operands()) {
3727 if (MO.isReg() && MO.isDef() &&
3728 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
3729 IsDef = true;
3730 break;
3731 }
3732 }
3733
3734 if (IsDef)
3735 continue;
3736
3737 Regs[FoundRegs++] = Candidate;
3738 if (FoundRegs == (unsigned)NumPops)
3739 break;
3740 }
3741
3742 if (FoundRegs == 0)
3743 return false;
3744
3745
3746 while (FoundRegs < (unsigned)NumPops)
3747 Regs[FoundRegs++] = Regs[0];
3748
3749 for (int i = 0; i < NumPops; ++i)
3751 Regs[i]);
3752
3753 return true;
3754}
3755
3760 unsigned Opcode = I->getOpcode();
3761 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
3762 DebugLoc DL = I->getDebugLoc();
3767
3768
3769
3770
3772 return I;
3773
3774 if (!reserveCallFrame) {
3775
3776
3777
3778
3779
3780
3781
3783
3787
3788
3789
3790
3791
3792
3793
3794
3795 bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty();
3796
3797 if (HasDwarfEHHandlers && !isDestroy &&
3801
3802 if (Amount == 0)
3803 return I;
3804
3805
3806
3807 Amount -= InternalAmt;
3808
3809
3810
3811
3812 if (isDestroy && InternalAmt && DwarfCFI && (MF))
3815
3816
3817 int64_t StackAdjustment = isDestroy ? Amount : -Amount;
3818
3819 if (StackAdjustment) {
3820
3821
3822
3825
3826 if (StackAdjustment) {
3827 if (!(F.hasMinSize() &&
3828 adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
3829 BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
3830 false);
3831 }
3832 }
3833
3835
3836
3837
3838
3839
3840
3841
3842 int64_t CfaAdjustment = -StackAdjustment;
3843
3844
3845 if (CfaAdjustment) {
3849 }
3850 }
3851
3852 return I;
3853 }
3854
3855 if (InternalAmt) {
3858 while (CI != B && !std::prev(CI)->isCall())
3859 --CI;
3860 BuildStackAdjustment(MBB, CI, DL, -InternalAmt, false);
3861 }
3862
3863 return I;
3864}
3865
3870 return true;
3871
3872
3873
3876 if (TLI.hasInlineStackProbe(MF) || TLI.hasStackProbeSymbol(MF))
3877 return false;
3878
3881}
3882
3885
3886
3887
3888
3889
3891 return false;
3892
3893
3894
3898
3900 return true;
3901
3902
3903
3904
3905
3907}
3908
3910
3911
3912 bool CompactUnwind =
3915 !CompactUnwind) &&
3916
3917
3918
3919
3920
3923}
3924
3927 const DebugLoc &DL, bool RestoreSP) const {
3931 "restoring EBP/ESI on non-32-bit target");
3932
3939
3940
3941
3943 int EHRegSize = MFI.getObjectSize(FI);
3944
3945 if (RestoreSP) {
3946
3948 X86::EBP, true, -EHRegSize)
3950 }
3951
3954 int EndOffset = -EHRegOffset - EHRegSize;
3956
3958
3966 assert(EndOffset >= 0 &&
3967 "end of registration object above normal EBP position!");
3968 } else if (UsedReg == BasePtr) {
3969
3973
3978 assert(UsedReg == BasePtr);
3980 UsedReg, true, Offset)
3982 } else {
3983 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
3984 }
3985 return MBBI;
3986}
3987
3990}
3991
3995}
3996
4004 FrameBase.Kind = DwarfFrameBase::CFA;
4007 return FrameBase;
4008 }
4009
4010 return DwarfFrameBase{DwarfFrameBase::Register, {FrameRegister}};
4011}
4012
4013namespace {
4014
4015struct X86FrameSortingObject {
4016 bool IsValid = false;
4017 unsigned ObjectIndex = 0;
4018 unsigned ObjectSize = 0;
4019 Align ObjectAlignment = Align(1);
4020 unsigned ObjectNumUses = 0;
4021};
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036struct X86FrameSortingComparator {
4037 inline bool operator()(const X86FrameSortingObject &A,
4038 const X86FrameSortingObject &B) const {
4039 uint64_t DensityAScaled, DensityBScaled;
4040
4041
4042
4043
4044 if (.IsValid)
4045 return false;
4046 if (.IsValid)
4047 return true;
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059 DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
4060 static_cast<uint64_t>(B.ObjectSize);
4061 DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
4062 static_cast<uint64_t>(A.ObjectSize);
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072 if (DensityAScaled == DensityBScaled)
4073 return A.ObjectAlignment < B.ObjectAlignment;
4074
4075 return DensityAScaled < DensityBScaled;
4076 }
4077};
4078}
4079
4080
4081
4082
4083
4087
4088
4089 if (ObjectsToAllocate.empty())
4090 return;
4091
4092
4093
4094
4095
4096
4097 std::vector SortingObjects(MFI.getObjectIndexEnd());
4098
4099
4100
4101 for (auto &Obj : ObjectsToAllocate) {
4102 SortingObjects[Obj].IsValid = true;
4103 SortingObjects[Obj].ObjectIndex = Obj;
4104 SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj);
4105
4107 if (ObjectSize == 0)
4108
4109 SortingObjects[Obj].ObjectSize = 4;
4110 else
4111 SortingObjects[Obj].ObjectSize = ObjectSize;
4112 }
4113
4114
4115 for (auto &MBB : MF) {
4117 if (MI.isDebugInstr())
4118 continue;
4120
4121 if (!MO.isFI())
4122 continue;
4123 int Index = MO.getIndex();
4124
4125
4127 SortingObjects[Index].IsValid)
4128 SortingObjects[Index].ObjectNumUses++;
4129 }
4130 }
4131 }
4132
4133
4134
4136
4137
4138
4139
4140
4141
4142 int i = 0;
4143 for (auto &Obj : SortingObjects) {
4144
4145 if (!Obj.IsValid)
4146 break;
4147 ObjectsToAllocate[i++] = Obj.ObjectIndex;
4148 }
4149
4150
4151 if (->hasStackRealignment(MF) && hasFP(MF))
4152 std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
4153}
4154
4155unsigned
4157
4158 unsigned Offset = 16;
4159
4161
4163
4164 Offset += getWinEHFuncletFrameSize(MF);
4166}
4167
4170
4171
4173
4174
4175
4178
4179
4180
4184 adjustFrameForMsvcCxxEh(MF);
4185 }
4186}
4187
4188void X86FrameLowering::adjustFrameForMsvcCxxEh(MachineFunction &MF) const {
4189
4190
4191
4192
4193
4196 int64_t MinFixedObjOffset = -SlotSize;
4198 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I));
4199
4202 int FrameIndex = H.CatchObj.FrameIndex;
4203 if (FrameIndex != INT_MAX) {
4204
4206 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
4207 MinFixedObjOffset -= MFI.getObjectSize(FrameIndex);
4209 }
4210 }
4211 }
4212
4213
4214 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
4215 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
4216 int UnwindHelpFI =
4219
4220
4221
4226
4229 UnwindHelpFI)
4231}
4232
4236
4239
4240
4242 MI->eraseFromParent();
4244 }
4245}
4246
4249
4250
4251
4256 if (NeedsRestore)
4258 IsSEH);
4259 }
4260}
4261
4262
4263
4266 unsigned NumSpilledRegs) {
4268 unsigned AllocSize = TRI->getSpillSize(*RC) * NumSpilledRegs;
4270 unsigned AlignedSize = alignTo(AllocSize, StackAlign);
4271 return AlignedSize - AllocSize;
4272}
4273
4274void X86FrameLowering::spillFPBPUsingSP(MachineFunction &MF,
4277 int SPAdjust) const {
4279
4281 DebugLoc DL = BeforeMI->getDebugLoc();
4282
4283
4284 if (FP.isValid()) {
4288 }
4289
4290
4295 }
4296
4297
4298 if (SPAdjust)
4300
4301
4302 if (FP.isValid() && needsDwarfCFI(MF)) {
4303
4304 unsigned CFIIndex =
4306 BuildMI(*MBB, BeforeMI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
4308
4309
4310
4313 int Offset = SPAdjust;
4315 Offset += TRI->getSpillSize(*TRI->getMinimalPhysRegClass(BP));
4316
4317
4318 if (TII.isFrameSetup(*BeforeMI)) {
4320 BeforeMI = std::next(BeforeMI);
4321 }
4325 unsigned DwarfStackPtr = TRI->getDwarfRegNum(StackPtr, true);
4326 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfStackPtr));
4328 CfaExpr.push_back(dwarf::DW_OP_deref);
4329 CfaExpr.push_back(dwarf::DW_OP_consts);
4332
4334 DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
4336 DefCfaExpr.append(CfaExpr.str());
4340 }
4341}
4342
4343void X86FrameLowering::restoreFPBPUsingSP(MachineFunction &MF,
4346 int SPAdjust) const {
4348
4349
4352 DebugLoc DL = AfterMI->getDebugLoc();
4353 if (SPAdjust)
4355
4356
4360 }
4361
4362
4363 if (FP.isValid()) {
4366
4367
4368 if (needsDwarfCFI(MF)) {
4369
4370 unsigned CFIIndex =
4372 BuildMI(*MBB, Pos, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
4374 }
4375 }
4376}
4377
4378void X86FrameLowering::saveAndRestoreFPBPUsingSP(
4381 assert(SpillFP || SpillBP);
4382
4385 unsigned NumRegs = 0;
4386
4387 if (SpillFP) {
4391 RC = TRI->getMinimalPhysRegClass(FP);
4392 ++NumRegs;
4393 }
4394 if (SpillBP) {
4398 RC = TRI->getMinimalPhysRegClass(BP);
4399 ++NumRegs;
4400 }
4402
4403 spillFPBPUsingSP(MF, BeforeMI, FP, BP, SPAdjust);
4404 restoreFPBPUsingSP(MF, AfterMI, FP, BP, SPAdjust);
4405}
4406
4407bool X86FrameLowering::skipSpillFPBP(
4409 if (MI->getOpcode() == X86::LCMPXCHG16B_SAVE_RBX) {
4410
4411
4412
4413
4414
4415 int FI;
4416 unsigned Reg;
4417 while (!(MI->getOpcode() == TargetOpcode::COPY &&
4418 MI->getOperand(1).getReg() == X86::RBX) &&
4420 ++MI;
4421 return true;
4422 }
4423 return false;
4424}
4425
4428 bool &AccessBP) {
4429 AccessFP = AccessBP = false;
4430 if (FP) {
4431 if (MI.findRegisterUseOperandIdx(FP, TRI, false) != -1 ||
4432 MI.findRegisterDefOperandIdx(FP, TRI, false, true) != -1)
4433 AccessFP = true;
4434 }
4435 if (BP) {
4436 if (MI.findRegisterUseOperandIdx(BP, TRI, false) != -1 ||
4437 MI.findRegisterDefOperandIdx(BP, TRI, false, true) != -1)
4438 AccessBP = true;
4439 }
4440 return AccessFP || AccessBP;
4441}
4442
4443
4444
4445
4447 if (.isCall())
4448 return false;
4449 if (InsideEHLabels)
4450 return true;
4451
4454 return false;
4455
4456
4459 if (MBBI->isCall())
4460 return false;
4461 return true;
4462}
4463
4464
4465
4466void X86FrameLowering::checkInterferedAccess(
4469 bool SpillBP) const {
4470 if (DefMI == KillMI)
4471 return;
4473 if (!SpillBP)
4474 return;
4475 } else {
4476 if (!SpillFP)
4477 return;
4478 }
4479
4480 auto MI = KillMI;
4483 [](const MachineOperand &MO) { return MO.isFI(); }))
4485 "Interference usage of base pointer/frame "
4486 "pointer.");
4487 MI++;
4488 }
4489}
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4515 if (TFI.hasFP(MF))
4519
4520
4521
4525 FP = 0;
4527 BP = 0;
4528 }
4529 if ( && !BP)
4530 return;
4531
4533 bool InsideEHLabels = false;
4537 continue;
4538 MI = *(std::prev(TermMI));
4539
4540 while (MI != ME) {
4541
4542
4543
4546 isInvoke(*MI, InsideEHLabels) || skipSpillFPBP(MF, MI)) {
4547 ++MI;
4548 continue;
4549 }
4550
4551 if (MI->getOpcode() == TargetOpcode::EH_LABEL) {
4552 InsideEHLabels = !InsideEHLabels;
4553 ++MI;
4554 continue;
4555 }
4556
4557 bool AccessFP, AccessBP;
4558
4560 ++MI;
4561 continue;
4562 }
4563
4564
4565
4566 bool FPLive = false, BPLive = false;
4567 bool SpillFP = false, SpillBP = false;
4569 do {
4570 SpillFP |= AccessFP;
4571 SpillBP |= AccessBP;
4572
4573
4574 if (FPLive && MI->findRegisterDefOperandIdx(FP, TRI, false, true) != -1)
4575 FPLive = false;
4576 if (FP && MI->findRegisterUseOperandIdx(FP, TRI, false) != -1)
4577 FPLive = true;
4578 if (BPLive && MI->findRegisterDefOperandIdx(BP, TRI, false, true) != -1)
4579 BPLive = false;
4580 if (BP && MI->findRegisterUseOperandIdx(BP, TRI, false) != -1)
4581 BPLive = true;
4582
4584 } while ((MI != ME) &&
4585 (FPLive || BPLive ||
4587
4588
4589 if (FPLive && !SpillBP)
4590 continue;
4591
4592
4593
4594 if (KillMI->isCall() && DefMI != ME) {
4595 auto FrameSetup = std::next(DefMI);
4596
4597
4598
4599 while (FrameSetup != ME && .isFrameSetup(*FrameSetup) &&
4600 !FrameSetup->isCall())
4601 ++FrameSetup;
4602
4603
4604 if (FrameSetup != ME && TII.isFrameSetup(*FrameSetup) &&
4605 (TII.getFrameSize(*FrameSetup) ||
4607 while (.isFrameInstr(*KillMI))
4608 --KillMI;
4609 DefMI = FrameSetup;
4611 ++MI;
4612 }
4613 }
4614
4615 checkInterferedAccess(MF, DefMI, KillMI, SpillFP, SpillBP);
4616
4617
4618 saveAndRestoreFPBPUsingSP(MF, &(*DefMI), &(*KillMI), SpillFP, SpillBP);
4619 }
4620 }
4621}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool isFuncletReturnInstr(const MachineInstr &MI)
static const uint64_t kSplitStackAvailable
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Given that RA is a live value
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static bool isTailCallOpcode(unsigned Opc)
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static constexpr Register SPReg
static constexpr Register FPReg
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool is64Bit(const char *name)
static unsigned calculateSetFPREG(uint64_t SPAdjust)
static unsigned GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary)
GetScratchRegister - Get a temp register for performing work in the segmented stack and the Erlang/Hi...
static unsigned getADDriOpcode(bool IsLP64)
static unsigned getPUSH2Opcode(const X86Subtarget &ST)
static unsigned getMOVriOpcode(bool Use64BitReg, int64_t Imm)
static unsigned getLEArOpcode(bool IsLP64)
static unsigned getSUBriOpcode(bool IsLP64)
static bool flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB)
Check if the flags need to be preserved before the terminators.
static bool isFPBPAccess(const MachineInstr &MI, Register FP, Register BP, const TargetRegisterInfo *TRI, bool &AccessFP, bool &AccessBP)
static bool isOpcodeRep(unsigned Opcode)
Return true if an opcode is part of the REP group of instructions.
static unsigned getANDriOpcode(bool IsLP64, int64_t Imm)
static bool isEAXLiveIn(MachineBasicBlock &MBB)
static int computeFPBPAlignmentGap(MachineFunction &MF, const TargetRegisterClass *RC, unsigned NumSpilledRegs)
static unsigned getADDrrOpcode(bool IsLP64)
static bool HasNestArgument(const MachineFunction *MF)
static unsigned getPOPOpcode(const X86Subtarget &ST)
static bool isInvoke(const MachineInstr &MI, bool InsideEHLabels)
static unsigned getPOP2Opcode(const X86Subtarget &ST)
static unsigned getHiPELiteral(NamedMDNode *HiPELiteralsMD, const StringRef LiteralName)
Lookup an ERTS parameter in the !hipe.literals named metadata node.
static bool blockEndIsUnreachable(const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI)
static unsigned getSUBrrOpcode(bool IsLP64)
static unsigned getPUSHOpcode(const X86Subtarget &ST)
static const unsigned FramePtr
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
reverse_iterator rend() const
bool empty() const
empty - Check if the array is empty.
reverse_iterator rbegin() const
LLVM Basic Block Representation.
iterator_range< const_set_bits_iterator > set_bits() const
static BranchProbability getOne()
static BranchProbability getZero()
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool needsUnwindTableEntry() const
True if this function needs an unwind table.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Module * getParent()
Get the module that this global value is contained inside of...
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool usesWindowsCFI() const
static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_def_cfa_register modifies a rule for computing CFA.
static MCCFIInstruction createGnuArgsSize(MCSymbol *L, int64_t Size, SMLoc Loc={})
A special wrapper for .cfi_escape that indicates GNU_ARGS_SIZE.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction createRememberState(MCSymbol *L, SMLoc Loc={})
.cfi_remember_state Save all current rules for all registers.
OpType getOperation() const
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int64_t Adjustment, SMLoc Loc={})
.cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but Offset is a relative value that is added/subt...
static MCCFIInstruction createRestoreState(MCSymbol *L, SMLoc Loc={})
.cfi_restore_state Restore the previously saved state.
const MCObjectFileInfo * getObjectFileInfo() const
const MCRegisterInfo * getRegisterInfo() const
void reportError(SMLoc L, const Twine &Msg)
MCSection * getCompactUnwindSection() const
MCRegAliasIterator enumerates all registers aliasing Reg.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
StringRef getString() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
bool hasEHPadSuccessor() const
bool isEHPad() const
Returns true if the block is a landing pad.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI, MCRegister Reg, const_iterator Before, unsigned Neighborhood=10) const
Return whether (physical) register Reg has been defined and not killed as of just before Before.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< iterator > terminators()
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
@ LQR_Live
Register is known to be (at least partially) live.
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool isCleanupFuncletEntry() const
Returns true if this is the entry block of a cleanup funclet.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool needsSplitStackProlog() const
Return true if this function requires a split stack prolog, even if it uses no stack space.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
void setCVBytesOfCalleeSavedRegisters(unsigned S)
int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasCopyImplyingStackAdjustment() const
Returns true if the function contains operations which will lower down to instructions which manipula...
bool hasStackObjects() const
Return true if there are any stack objects in this function.
int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)
Create a spill slot at a fixed location on the stack.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setStackSize(uint64_t Size)
Set the size of the stack.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getObjectIndexBegin() const
Return the minimum frame object index.
void setOffsetAdjustment(int64_t Adj)
Set the correction for frame offsets.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
unsigned addFrameInst(const MCCFIInstruction &Inst)
void setHasWinCFI(bool v)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const std::vector< MCCFIInstruction > & getFrameInstructions() const
Returns a reference to a list of cfi instructions in the function's prologue.
bool hasInlineAsm() const
Returns true if the function contains any inline assembly.
void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair, unsigned SubReg=0)
Create a substitution between one <instr,operand> value to a different, new value.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
bool callsUnwindInit() const
void push_front(MachineBasicBlock *MBB)
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MCContext & getContext() const
bool callsEHReturn() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const std::vector< LandingPadInfo > & getLandingPads() const
Return a reference to the landing pad info for the current function.
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getNumOperands() const
Retuns the total number of operands.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
bool isLiveIn(Register Reg) const
NamedMDNode * getNamedMetadata(StringRef Name) const
Return the first NamedMDNode in the module with the specified name.
unsigned getCodeViewFlag() const
Returns the CodeView Version by checking module flags.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
MDNode * getOperand(unsigned i) const
unsigned getNumOperands() const
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
Represents a location in source code.
SlotIndex - An opaque wrapper around machine indexes.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
void append(StringRef RHS)
Append from a StringRef.
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
static constexpr size_t npos
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
const Triple & getTargetTriple() const
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
SwiftAsyncFramePointerMode SwiftAsyncFramePointer
Control when and how the Swift async frame pointer bit should be set.
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual Register getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
bool isOSWindows() const
Tests whether the OS is Windows.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
bool has128ByteRedZone(const MachineFunction &MF) const
Return true if the function has a redzone (accessible bytes past the frame of the top of stack functi...
void spillFPBP(MachineFunction &MF) const override
If a function uses base pointer and the base pointer is clobbered by inline asm, RA doesn't detect th...
bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override
canSimplifyCallFramePseudos - If there is a reserved call frame, the call frame pseudos can be simpli...
bool needsFrameIndexResolution(const MachineFunction &MF) const override
X86FrameLowering(const X86Subtarget &STI, MaybeAlign StackAlignOverride)
const X86RegisterInfo * TRI
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
bool hasFPImpl(const MachineFunction &MF) const override
hasFPImpl - Return true if the specified function should have a dedicated frame pointer register.
MachineBasicBlock::iterator restoreWin32EHStackPointers(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool RestoreSP=false) const
Sets up EBP and optionally ESI based on the incoming EBP value.
int getInitialCFAOffset(const MachineFunction &MF) const override
Return initial CFA offset value i.e.
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
void emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog, std::optional< MachineFunction::DebugInstrOperandPair > InstrNum=std::nullopt) const
Emit target stack probe code.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool IsPrologue) const
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReferenceSP(const MachineFunction &MF, int FI, Register &SPReg, int Adjustment) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
bool enableShrinkWrapping(const MachineFunction &MF) const override
Returns true if the target will correctly handle shrink wrapping.
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
void inlineStackProbe(MachineFunction &MF, MachineBasicBlock &PrologMBB) const override
Replace a StackProbe inline-stub with the actual probe code inline.
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &DL, int64_t NumBytes, bool InEpilogue) const
Emit a series of instructions to increment / decrement the stack pointer by a constant value.
bool canUseAsEpilogue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a epilogue for the target.
bool Is64Bit
Is64Bit implies that x86_64 instructions are available.
Register getInitialCFARegister(const MachineFunction &MF) const override
Return initial CFA register value i.e.
bool Uses64BitFramePtr
True if the 64-bit frame or stack pointer should be used.
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
void adjustForSegmentedStacks(MachineFunction &MF, MachineBasicBlock &PrologueMBB) const override
Adjust the prologue to have the function use segmented stacks.
DwarfFrameBase getDwarfFrameBase(const MachineFunction &MF) const override
Return the frame base information to be encoded in the DWARF subprogram debug info.
void emitCalleeSavedFrameMovesFullCFA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const override
Emits Dwarf Info specifying offsets of callee saved registers and frame pointer.
int getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, Register &SPReg) const
bool canUseLEAForSPInEpilogue(const MachineFunction &MF) const
Check that LEA can be used on SP in an epilogue sequence for MF.
bool stackProbeFunctionModifiesSP() const override
Does the stack probe function call return with a modified stack pointer?
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack.
void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const MCCFIInstruction &CFIInst, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Wraps up getting a CFI index and building a MachineInstr for it.
int mergeSPUpdates(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, bool doMergeWithPrevious) const
Check the instruction before/after the passed instruction.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
Same as getFrameIndexReference, except that the stack pointer (as opposed to the frame pointer) will ...
void restoreWinEHStackPointersInParent(MachineFunction &MF) const
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
void adjustForHiPEPrologue(MachineFunction &MF, MachineBasicBlock &PrologueMBB) const override
Erlang programs may need a special prologue to handle the stack size they might need at runtime.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const override
int64_t getFrameAdjustment(const MachineInstr &I) const
Returns the stack pointer adjustment that happens inside the frame setup..destroy sequence (e....
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
bool getForceFramePointer() const
void setPadForPush2Pop2(bool V)
bool isCandidateForPush2Pop2(Register Reg) const
unsigned getArgumentStackSize() const
bool getFPClobberedByCall() const
int getRestoreBasePointerOffset() const
int getSEHFramePtrSaveIndex() const
bool hasCFIAdjustCfa() const
int getTCReturnAddrDelta() const
void setRestoreBasePointer(const MachineFunction *MF)
bool getHasSEHFramePtrSave() const
DenseMap< int, unsigned > & getWinEHXMMSlotInfo()
bool getBPClobberedByCall() const
void setUsesRedZone(bool V)
bool hasPreallocatedCall() const
bool hasSwiftAsyncContext() const
void setHasSEHFramePtrSave(bool V)
bool getRestoreBasePointer() const
MachineInstr * getStackPtrSaveMI() const
size_t getNumCandidatesForPush2Pop2() const
AMXProgModelEnum getAMXProgModel() const
void addCandidateForPush2Pop2(Register Reg)
unsigned getCalleeSavedFrameSize() const
bool getHasPushSequences() const
bool padForPush2Pop2() const
void setStackPtrSaveMI(MachineInstr *MI)
bool getUsesRedZone() const
void setCalleeSavedFrameSize(unsigned bytes)
void setSEHFramePtrSaveIndex(int Index)
bool hasBasePointer(const MachineFunction &MF) const
Register getFrameRegister(const MachineFunction &MF) const override
unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI) const
findDeadCallerSavedReg - Return a caller-saved register that isn't live when it reaches the "return" ...
Register getStackRegister() const
unsigned getSlotSize() const
Register getFramePtr() const
Returns physical register used as frame pointer.
Register getBaseRegister() const
const X86TargetLowering * getTargetLowering() const override
bool isTargetDragonFly() const
bool isTargetWindowsMSVC() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
bool isTargetDarwin() const
bool isTargetWin64() const
bool isTarget64BitLP64() const
Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
bool swiftAsyncContextIsDynamicallySet() const
Return whether FrameLowering should always set the "extended frame present" bit in FP,...
bool isTargetWindowsCoreCLR() const
const X86InstrInfo * getInstrInfo() const override
bool isCallingConvWin64(CallingConv::ID CC) const
bool isTargetFreeBSD() const
bool isTargetNaCl64() const
bool isTargetWin32() const
bool useIndirectThunkCalls() const
bool isTargetLinux() const
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
StringRef getStackProbeSymbolName(const MachineFunction &MF) const override
Returns the name of the symbol used to emit stack probes or the empty string if not applicable.
bool hasStackProbeSymbol(const MachineFunction &MF) const override
Returns true if stack probing through a function call is requested.
unsigned getStackProbeSize(const MachineFunction &MF) const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
uint16_t StackAdjustment(const RuntimeFunction &RF)
StackAdjustment - calculated stack adjustment in words.
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
@ X86_INTR
x86 hardware interrupt context.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ X86_FastCall
'fast' analog of X86_StdCall.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
@ DwarfCFI
DWARF-like instruction based exceptions.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
IterT skipDebugInstructionsForward(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It until it points to a non-debug instruction or to End and return the resulting iterator.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
static const MachineInstrBuilder & addRegOffset(const MachineInstrBuilder &MIB, unsigned Reg, bool isKill, int Offset)
addRegOffset - This function is used to add a memory reference of the form [Reg + Offset],...
@ Always
Always set the bit.
@ Never
Never set the bit.
@ DeploymentBased
Determine whether to set the bit statically or dynamically based on the deployment target.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
IterT skipDebugInstructionsBackward(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It until it points to a non-debug instruction or to Begin and return the resulting iterator...
unsigned getUndefRegState(bool B)
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
unsigned encodeSLEB128(int64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a SLEB128 value to an output stream.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
unsigned encodeULEB128(uint64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a ULEB128 value to an output stream.
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Pair of physical register and lane mask.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
union llvm::TargetFrameLowering::DwarfFrameBase::@248 Location
enum llvm::TargetFrameLowering::DwarfFrameBase::FrameBaseKind Kind
SmallVector< WinEHTryBlockMapEntry, 4 > TryBlockMap
SmallVector< WinEHHandlerType, 1 > HandlerArray