LLVM: lib/Target/X86/X86FrameLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

38#include

39

40#define DEBUG_TYPE "x86-fl"

41

42STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue");

44 "Number of extra stack probes generated in prologue");

45STATISTIC(NumFunctionUsingPush2Pop2, "Number of functions using push2/pop2");

46

47using namespace llvm;

48

63

69

70

71

72

73

78 (hasFP(MF) && TRI->hasStackRealignment(MF)) ||

79 TRI->hasBasePointer(MF);

80}

81

82

83

84

85

86

87

88

94

95

96

97

109

111 return IsLP64 ? X86::SUB64ri32 : X86::SUB32ri;

112}

113

115 return IsLP64 ? X86::ADD64ri32 : X86::ADD32ri;

116}

117

119 return IsLP64 ? X86::SUB64rr : X86::SUB32rr;

120}

121

123 return IsLP64 ? X86::ADD64rr : X86::ADD32rr;

124}

125

127 return IsLP64 ? X86::AND64ri32 : X86::AND32ri;

128}

129

131 return IsLP64 ? X86::LEA64r : X86::LEA32r;

132}

133

135 if (Use64BitReg) {

137 return X86::MOV32ri64;

139 return X86::MOV64ri32;

140 return X86::MOV64ri;

141 }

142 return X86::MOV32ri;

143}

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

163 return ST.is64Bit() ? (ST.hasPPX() ? X86::PUSHP64r : X86::PUSH64r)

164 : X86::PUSH32r;

165}

167 return ST.is64Bit() ? (ST.hasPPX() ? X86::POPP64r : X86::POP64r)

168 : X86::POP32r;

169}

171 return ST.hasPPX() ? X86::PUSH2P : X86::PUSH2;

172}

174 return ST.hasPPX() ? X86::POP2P : X86::POP2;

175}

176

180

181 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||

182 Reg == X86::AH || Reg == X86::AL)

183 return true;

184 }

185

186 return false;

187}

188

189

190

191

192

193static bool

196 bool BreakNext = false;

198 if (!MO.isReg())

199 continue;

201 if (Reg != X86::EFLAGS)

202 continue;

203

204

205

206

207 if (!MO.isDef())

208 return true;

209

210

211

212 BreakNext = true;

213 }

214

215 if (BreakNext)

216 return false;

217 }

218

219

220

222 if (Succ->isLiveIn(X86::EFLAGS))

223 return true;

224

225 return false;

226}

227

229

230

231

234 const DebugLoc &DL, int64_t NumBytes,

235 bool InEpilogue) const {

236 bool isSub = NumBytes < 0;

240

242

243

244

246 return;

247 }

248

252 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);

253

254

255

256 if (EmitInlineStackProbe && !InEpilogue) {

257

258

259

261 return;

263

264

265 unsigned Reg = 0;

267

269 Reg = Rax;

270 else

273

276 if (Reg) {

278 Reg)

284 MI->getOperand(3).setIsDead();

285 return;

287

288

289

290

291

292

293

298

299

300 if (isSub)

302 else

305 Rax)

311 MI->getOperand(3).setIsDead();

312

316

319 return;

320 }

321 }

322

325

326

327 unsigned Reg = isSub ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)

328 : TRI->findDeadCallerSavedReg(MBB, MBBI);

329 if (Reg) {

330 unsigned Opc = isSub ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)

331 : (Is64Bit ? X86::POP64r : X86::POP32r);

335 return;

336 }

337 }

338

340

341 BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue)

342 .setMIFlag(Flag);

343

345 }

346}

347

351 assert(Offset != 0 && "zero offset stack adjustment requested");

352

353

354

355 bool UseLEA;

356 if (!InEpilogue) {

357

358

359

360

361 UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS);

362 } else {

363

364

365

366

367

369 if (UseLEA && STI.useLeaForSP())

371

372

374 "We shouldn't have allowed this insertion point");

375 }

376

377 MachineInstrBuilder MI;

378 if (UseLEA) {

383 } else {

384 bool IsSub = Offset < 0;

385 uint64_t AbsOffset = IsSub ? -Offset : Offset;

391 MI->getOperand(3).setIsDead();

392 }

393 return MI;

394}

395

396template <typename FoundT, typename CalcT>

399 FoundT FoundStackAdjust,

400 CalcT CalcNewOffset,

401 bool doMergeWithPrevious) const {

402 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||

403 (!doMergeWithPrevious && MBBI == MBB.end()))

404 return CalcNewOffset(0);

405

407

409

410

411

412

413

414

415

416

417

418

419

420 if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction())

421 PI = std::prev(PI);

422

424 for (;;) {

425 unsigned Opc = PI->getOpcode();

426

427 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD32ri) &&

428 PI->getOperand(0).getReg() == StackPtr) {

430 Offset = PI->getOperand(2).getImm();

431 } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&

432 PI->getOperand(0).getReg() == StackPtr &&

433 PI->getOperand(1).getReg() == StackPtr &&

434 PI->getOperand(2).getImm() == 1 &&

435 PI->getOperand(3).getReg() == X86::NoRegister &&

436 PI->getOperand(5).getReg() == X86::NoRegister) {

437

438 Offset = PI->getOperand(4).getImm();

439 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB32ri) &&

440 PI->getOperand(0).getReg() == StackPtr) {

442 Offset = -PI->getOperand(2).getImm();

443 } else

444 return CalcNewOffset(0);

445

446 FoundStackAdjust(PI, Offset);

447 if ((uint64_t)std::abs((int64_t)CalcNewOffset(Offset)) < MaxSPChunk)

448 break;

449

450 if (doMergeWithPrevious ? (PI == MBB.begin()) : (PI == MBB.end()))

451 return CalcNewOffset(0);

452

453 PI = doMergeWithPrevious ? std::prev(PI) : std::next(PI);

454 }

455

457 if (PI != MBB.end() && PI->isCFIInstruction()) {

459 MCCFIInstruction CI = CIs[PI->getOperand(0).getCFIIndex()];

463 }

464 if (!doMergeWithPrevious)

466

467 return CalcNewOffset(Offset);

468}

469

472 int64_t AddOffset,

473 bool doMergeWithPrevious) const {

474 return mergeSPUpdates(

476 doMergeWithPrevious);

477}

478

485 unsigned CFIIndex = MF.addFrameInst(CFIInst);

486

489

493}

494

495

496

500 if (hasFP(MF)) {

502 return;

503 }

506 const Register MachineFramePtr =

509 unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true);

510

515}

516

519 const DebugLoc &DL, bool IsPrologue) const {

524

525

527

528

532 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);

533

534 if (IsPrologue) {

536

537

538

539

540

543 CfaExpr.push_back(dwarf::DW_CFA_expression);

548 const Register MachineFramePtr =

549 STI.isTarget64BitILP32()

552 unsigned DwarfFramePtr = MRI->getDwarfRegNum(MachineFramePtr, true);

553 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfFramePtr));

558 } else {

561 }

562 } else {

565 }

566 }

568 int FI = MI->getOperand(1).getIndex();

572 const Register MachineFramePtr =

573 STI.isTarget64BitILP32()

576 unsigned DwarfFramePtr = MRI->getDwarfRegNum(MachineFramePtr, true);

577 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfFramePtr));

580 CfaExpr.push_back(dwarf::DW_OP_deref);

581

583 DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);

585 DefCfaExpr.append(CfaExpr.str());

586

590 }

591}

592

593void X86FrameLowering::emitZeroCallUsedRegs(BitVector RegsToZero,

596

597

599

600

603 DL = MBBI->getDebugLoc();

604

605

606

609 if (!X86::RFP80RegClass.contains(Reg))

610 continue;

611

612 unsigned NumFPRegs = ST.is64Bit() ? 8 : 7;

613 for (unsigned i = 0; i != NumFPRegs; ++i)

615

616 for (unsigned i = 0; i != NumFPRegs; ++i)

618 break;

619 }

620

621

624 if (TRI->isGeneralPurposeRegister(MF, Reg)) {

627 }

628

629

630 for (MCRegister Reg : GPRsToZero.set_bits())

632

633

634 for (MCRegister Reg : RegsToZero.set_bits())

636}

637

641 std::optionalMachineFunction::DebugInstrOperandPair InstrNum) const {

643 if (STI.isTargetWindowsCoreCLR()) {

644 if (InProlog) {

646 .addImm(0 );

647 } else {

648 emitStackProbeInline(MF, MBB, MBBI, DL, false);

649 }

650 } else {

651 emitStackProbeCall(MF, MBB, MBBI, DL, InProlog, InstrNum);

652 }

653}

654

656 return STI.isOSWindows() && STI.isTargetWin64();

657}

658

662 return MI.getOpcode() == X86::STACKALLOC_W_PROBING;

663 });

664 if (Where != PrologMBB.end()) {

666 emitStackProbeInline(MF, PrologMBB, Where, DL, true);

667 Where->eraseFromParent();

668 }

669}

670

671void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,

675 bool InProlog) const {

678 emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog);

679 else

680 emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog);

681}

682

683void X86FrameLowering::emitStackProbeInlineGeneric(

688

692 "different expansion expected for CoreCLR 64 bit");

693

694 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);

695 uint64_t ProbeChunk = StackProbeSize * 8;

696

698 TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0;

699

700

701

702

703 if (Offset > ProbeChunk) {

704 emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset,

705 MaxAlign % StackProbeSize);

706 } else {

707 emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset,

708 MaxAlign % StackProbeSize);

709 }

710}

711

712void X86FrameLowering::emitStackProbeInlineGenericBlock(

715 uint64_t AlignOffset) const {

716

717 const bool NeedsDwarfCFI = needsDwarfCFI(MF);

719 const X86Subtarget &STI = MF.getSubtarget();

720 const X86TargetLowering &TLI = *STI.getTargetLowering();

721 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;

722 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);

723

724 uint64_t CurrentOffset = 0;

725

726 assert(AlignOffset < StackProbeSize);

727

728

729 if (StackProbeSize < Offset + AlignOffset) {

730

732 BuildStackAdjustment(MBB, MBBI, DL, -StackAdjustment, false)

734 if (!HasFP && NeedsDwarfCFI) {

738 }

739

745 NumFrameExtraProbe++;

746 CurrentOffset = StackProbeSize - AlignOffset;

747 }

748

749

750

751

752 while (CurrentOffset + StackProbeSize < Offset) {

753 BuildStackAdjustment(MBB, MBBI, DL, -StackProbeSize, false)

755

756 if (!HasFP && NeedsDwarfCFI) {

760 }

766 NumFrameExtraProbe++;

767 CurrentOffset += StackProbeSize;

768 }

769

770

771 uint64_t ChunkSize = Offset - CurrentOffset;

773

774

775 unsigned Reg = Is64Bit ? X86::RAX : X86::EAX;

776 unsigned Opc = Is64Bit ? X86::PUSH64r : X86::PUSH32r;

780 } else {

781 BuildStackAdjustment(MBB, MBBI, DL, -ChunkSize, false)

783 }

784

785

786}

787

788void X86FrameLowering::emitStackProbeInlineGenericLoop(

791 uint64_t AlignOffset) const {

793

796 "Inline stack probe loop will clobber live EFLAGS.");

797

798 const bool NeedsDwarfCFI = needsDwarfCFI(MF);

800 const X86Subtarget &STI = MF.getSubtarget();

801 const X86TargetLowering &TLI = *STI.getTargetLowering();

802 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;

803 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);

804

805 if (AlignOffset) {

806 if (AlignOffset < StackProbeSize) {

807

808 BuildStackAdjustment(MBB, MBBI, DL, -AlignOffset, false)

810

816 NumFrameExtraProbe++;

817 Offset -= AlignOffset;

818 }

819 }

820

821

822 NumFrameLoopProbe++;

824

827

829 MF.insert(MBBIter, testMBB);

830 MF.insert(MBBIter, tailMBB);

831

834 : X86::EAX;

835

836

837 {

838 const uint64_t BoundOffset = alignDown(Offset, StackProbeSize);

839

840

841

842

843 bool canUseSub =

845

846 if (canUseSub) {

848

853 .addReg(FinalStackProbed)

858 .addImm(-BoundOffset)

861 .addReg(FinalStackProbed)

864 } else {

865 llvm_unreachable("Offset too large for 32-bit stack pointer");

866 }

867

868

869

870 if (!HasFP && NeedsDwarfCFI) {

871

872

873 const Register DwarfFinalStackProbed =

874 STI.isTarget64BitILP32()

876 : FinalStackProbed;

877

880 nullptr, TRI->getDwarfRegNum(DwarfFinalStackProbed, true)));

883 }

884 }

885

886

887 BuildStackAdjustment(*testMBB, testMBB->end(), DL, -StackProbeSize,

888 false)

890

891

897

898

901 .addReg(FinalStackProbed)

903

904

911

912

916

917

918 const uint64_t TailOffset = Offset % StackProbeSize;

920 if (TailOffset) {

921 BuildStackAdjustment(*tailMBB, TailMBBIter, DL, -TailOffset,

922 false)

924 }

925

926

927 if (!HasFP && NeedsDwarfCFI) {

928

929

930 const Register DwarfStackPtr =

931 STI.isTarget64BitILP32()

934

937 nullptr, TRI->getDwarfRegNum(DwarfStackPtr, true)));

938 }

939

940

942}

943

944void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(

947 const X86Subtarget &STI = MF.getSubtarget();

948 assert(STI.is64Bit() && "different expansion needed for 32 bit");

949 assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");

950 const TargetInstrInfo &TII = *STI.getInstrInfo();

952

955 "Inline stack probe loop will clobber live EFLAGS.");

956

957

958

959

960

961

962

963

964

965

966

967

968

969

970

971

972

973

974

975

976

977

978

979

980

981

982

983

984

988

990 MF.insert(MBBIter, RoundMBB);

991 MF.insert(MBBIter, LoopMBB);

992 MF.insert(MBBIter, ContinueMBB);

993

994

998

999

1000 const int64_t ThreadEnvironmentStackLimit = 0x10;

1001 const int64_t PageSize = 0x1000;

1002 const int64_t PageMask = ~(PageSize - 1);

1003

1004

1005

1007 const TargetRegisterClass *RegClass = &X86::GR64RegClass;

1009 SizeReg = InProlog ? X86::RAX : MRI.createVirtualRegister(RegClass),

1010 ZeroReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),

1011 CopyReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),

1012 TestReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),

1013 FinalReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),

1014 RoundedReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),

1015 LimitReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),

1016 JoinReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),

1017 ProbeReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass);

1018

1019

1020 int64_t RCXShadowSlot = 0;

1021 int64_t RDXShadowSlot = 0;

1022

1023

1024 if (InProlog) {

1025

1026

1027

1028 X86MachineFunctionInfo *X86FI = MF.getInfo();

1031

1032

1033

1034

1035 const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX);

1036 const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX);

1037 int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0);

1038

1039

1040 if (IsRCXLiveIn)

1041 RCXShadowSlot = InitSlot;

1042 if (IsRDXLiveIn)

1043 RDXShadowSlot = InitSlot;

1044 if (IsRDXLiveIn && IsRCXLiveIn)

1045 RDXShadowSlot += 8;

1046

1047 if (IsRCXLiveIn)

1049 RCXShadowSlot)

1051 if (IsRDXLiveIn)

1053 RDXShadowSlot)

1055 } else {

1056

1058 }

1059

1060

1061

1073

1074

1075

1076

1077

1078

1079

1080

1085 .addImm(ThreadEnvironmentStackLimit)

1088

1090 .addMBB(ContinueMBB)

1092

1093

1094 if (InProlog)

1096 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg)

1100

1101

1102

1103

1104 if (!InProlog) {

1105 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg)

1110 }

1111

1112 if (InProlog)

1116

1117

1125

1126 if (InProlog)

1134

1136

1137

1138 if (InProlog) {

1139 if (RCXShadowSlot)

1141 TII.get(X86::MOV64rm), X86::RCX),

1142 X86::RSP, false, RCXShadowSlot);

1143 if (RDXShadowSlot)

1145 TII.get(X86::MOV64rm), X86::RDX),

1146 X86::RSP, false, RDXShadowSlot);

1147 }

1148

1149

1150

1151 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP)

1154

1155

1161

1162 if (InProlog) {

1163 LivePhysRegs LiveRegs;

1165 }

1166

1167

1168 if (InProlog) {

1169 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) {

1171 }

1172 for (MachineInstr &MI : *RoundMBB) {

1174 }

1175 for (MachineInstr &MI : *LoopMBB) {

1177 }

1178 for (MachineInstr &MI :

1181 }

1182 }

1183}

1184

1185void X86FrameLowering::emitStackProbeCall(

1188 std::optionalMachineFunction::DebugInstrOperandPair InstrNum) const {

1190

1191

1192 if (Is64Bit && IsLargeCodeModel && STI.useIndirectThunkCalls())

1193 report_fatal_error("Emitting stack probe calls on 64-bit with the large "

1194 "code model and indirect thunks not yet implemented.");

1195

1198 "Stack probe calls will clobber live EFLAGS.");

1199

1200 unsigned CallOp;

1202 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;

1203 else

1204 CallOp = X86::CALLpcrel32;

1205

1206 StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF);

1207

1208 MachineInstrBuilder CI;

1210

1211

1212

1214

1215

1219 } else {

1222 }

1223

1231

1232 MachineInstr *ModInst = CI;

1233 if (STI.isTargetWin64() || STI.isOSWindows()) {

1234

1235

1236

1237

1238

1239

1240 ModInst =

1244 }

1245

1246

1247

1248

1249 if (InstrNum) {

1250 if (STI.isTargetWin64() || STI.isOSWindows()) {

1251

1254 } else {

1255

1256

1257 unsigned SPDefOperand = ModInst->getNumOperands() - 2;

1260 }

1261 }

1262

1263 if (InProlog) {

1264

1265 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)

1267 }

1268}

1269

1271

1272

1273 const uint64_t Win64MaxSEHOffset = 128;

1274 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);

1275

1276 return SEHFrameOffset & -16;

1277}

1278

1279

1280

1281

1282

1283uint64_t

1284X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {

1285 const MachineFrameInfo &MFI = MF.getFrameInfo();

1286 Align MaxAlign = MFI.getMaxAlign();

1289 if (HasRealign) {

1291 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;

1292 else if (MaxAlign < SlotSize)

1294 }

1295

1297 if (HasRealign)

1298 MaxAlign = (MaxAlign > 16) ? MaxAlign : Align(16);

1299 else

1300 MaxAlign = Align(16);

1301 }

1302 return MaxAlign.value();

1303}

1304

1308 uint64_t MaxAlign) const {

1309 uint64_t Val = -MaxAlign;

1311

1313 const X86Subtarget &STI = MF.getSubtarget();

1314 const X86TargetLowering &TLI = *STI.getTargetLowering();

1315 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);

1316 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);

1317

1318

1319

1320

1321 if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) {

1322 {

1323 NumFrameLoopProbe++;

1324 MachineBasicBlock *entryMBB =

1326 MachineBasicBlock *headMBB =

1328 MachineBasicBlock *bodyMBB =

1330 MachineBasicBlock *footMBB =

1332

1334 MF.insert(MBBIter, entryMBB);

1335 MF.insert(MBBIter, headMBB);

1336 MF.insert(MBBIter, bodyMBB);

1337 MF.insert(MBBIter, footMBB);

1338 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;

1341 : X86::EAX;

1342

1343

1344 {

1345

1347 BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)

1350 MachineInstr *MI =

1351 BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed)

1352 .addReg(FinalStackProbed)

1355

1356

1357 MI->getOperand(3).setIsDead();

1358

1361 .addReg(FinalStackProbed)

1370 }

1371

1372

1373

1374 {

1378 .addImm(StackProbeSize)

1380

1384 .addReg(FinalStackProbed)

1386

1387

1392

1395 }

1396

1397

1398 {

1404

1408 .addImm(StackProbeSize)

1410

1411

1414 .addReg(FinalStackProbed)

1417

1418

1425 }

1426

1427

1428 {

1430 .addReg(FinalStackProbed)

1438 }

1439

1441 }

1442 } else {

1447

1448

1449 MI->getOperand(3).setIsDead();

1450 }

1451}

1452

1454

1455

1457 "MF used frame lowering for wrong subtarget");

1459 const bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv());

1461}

1462

1463

1464

1465

1466bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const {

1468}

1469

1470bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const {

1472}

1473

1474

1476 switch (Opcode) {

1477 case X86::REPNE_PREFIX:

1478 case X86::REP_MOVSB_32:

1479 case X86::REP_MOVSB_64:

1480 case X86::REP_MOVSD_32:

1481 case X86::REP_MOVSD_64:

1482 case X86::REP_MOVSQ_32:

1483 case X86::REP_MOVSQ_64:

1484 case X86::REP_MOVSW_32:

1485 case X86::REP_MOVSW_64:

1486 case X86::REP_PREFIX:

1487 case X86::REP_STOSB_32:

1488 case X86::REP_STOSB_64:

1489 case X86::REP_STOSD_32:

1490 case X86::REP_STOSD_64:

1491 case X86::REP_STOSQ_32:

1492 case X86::REP_STOSQ_64:

1493 case X86::REP_STOSW_32:

1494 case X86::REP_STOSW_64:

1495 return true;

1496 default:

1497 break;

1498 }

1499 return false;

1500}

1501

1502

1503

1504

1505

1506

1507

1508

1509

1510

1511

1512

1513

1514

1515

1516

1517

1518

1519

1520

1521

1522

1523

1524

1525

1526

1527

1528

1529

1530

1531

1532

1533

1534

1535

1536

1537

1538

1539

1540

1541

1542

1543

1544

1545

1546

1547

1548

1549

1550

1551

1552

1553

1554

1555

1556

1557

1558

1559

1560

1561

1562

1563

1564

1565

1566

1567

1568

1569

1570

1571

1572

1573

1574

1575

1576

1577

1578

1579

1580

1581

1582

1583

1584

1585

1586

1590 "MF used frame lowering for wrong subtarget");

1595 uint64_t MaxAlign = calculateMaxStackAlign(MF);

1597 bool IsFunclet = MBB.isEHFuncletEntry();

1601 bool FnHasClrFunclet =

1603 bool IsClrFunclet = IsFunclet && FnHasClrFunclet;

1604 bool HasFP = hasFP(MF);

1605 bool IsWin64Prologue = isWin64Prologue(MF);

1607

1608 bool NeedsWinFPO = !IsFunclet && STI.isTargetWin32() &&

1610 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;

1611 bool NeedsDwarfCFI = needsDwarfCFI(MF);

1613 const Register MachineFramePtr =

1616 Register BasePtr = TRI->getBaseRegister();

1617 bool HasWinCFI = false;

1618

1619

1620

1623

1624

1626

1627

1628 ArgBaseReg = MI->getOperand(0).getReg();

1629

1630

1631

1632

1634 ArgBaseReg)

1637 .addUse(X86::NoRegister)

1639 .addUse(X86::NoRegister)

1641 if (NeedsDwarfCFI) {

1642

1643 unsigned DwarfStackPtr = TRI->getDwarfRegNum(ArgBaseReg, true);

1647 }

1653 .addReg(X86::NoRegister)

1655 .addReg(X86::NoRegister)

1657 }

1658

1659

1660

1662 if (TailCallArgReserveSize && IsWin64Prologue)

1663 report_fatal_error("Can't handle guaranteed tail call under win64 yet");

1664

1665 const bool EmitStackProbeCall =

1666 STI.getTargetLowering()->hasStackProbeSymbol(MF);

1667 unsigned StackProbeSize = STI.getTargetLowering()->getStackProbeSize(MF);

1668

1672 if (STI.swiftAsyncContextIsDynamicallySet()) {

1673

1674

1676 .addUse(MachineFramePtr)

1679 .addUse(X86::NoRegister)

1682 .addUse(X86::NoRegister);

1683 break;

1684 }

1685 [[fallthrough]];

1686

1689 !IsWin64Prologue &&

1690 "win64 prologue does not set the bit 60 in the saved frame pointer");

1692 .addUse(MachineFramePtr)

1695 break;

1696

1698 break;

1699 }

1700 }

1701

1702

1703

1704

1707 StackSize += 8;

1709

1710

1711

1712

1713

1714

1715

1716

1717

1718

1722 }

1723

1724

1725

1726

1727

1728

1732 !EmitStackProbeCall &&

1737 if (HasFP)

1739 X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);

1740 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);

1742 }

1743

1744

1745

1746

1747 if (TailCallArgReserveSize != 0) {

1748 BuildStackAdjustment(MBB, MBBI, DL, -(int)TailCallArgReserveSize,

1749 false)

1751 }

1752

1753

1754

1755

1756

1757

1758

1759

1760

1761

1762

1763

1764

1765

1766

1768 int stackGrowth = -SlotSize;

1769

1770

1772 if (IsClrFunclet)

1774 else if (IsFunclet)

1776

1777 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {

1778

1779

1780

1781 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;

1783 .addReg(Establisher)

1785 MBB.addLiveIn(Establisher);

1786 }

1787

1788 if (HasFP) {

1790

1791

1793 NumBytes =

1795

1796

1797 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)

1798 NumBytes = alignTo(NumBytes, MaxAlign);

1799

1800

1805

1806 if (NeedsDwarfCFI && !ArgBaseReg.isValid()) {

1807

1808

1812 nullptr, -2 * stackGrowth + (int)TailCallArgReserveSize),

1814

1815

1816 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);

1819 2 * stackGrowth -

1820 (int)TailCallArgReserveSize),

1822 }

1823

1824 if (NeedsWinCFI) {

1825 HasWinCFI = true;

1829 }

1830

1831 if (!IsFunclet) {

1833 assert(!IsWin64Prologue &&

1834 "win64 prologue does not store async context right below rbp");

1836

1837

1838

1839

1840 if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) {

1841

1842

1843 MBB.addLiveIn(X86::R14);

1847 } else {

1848

1849

1853 }

1854

1855 if (NeedsWinCFI) {

1856 HasWinCFI = true;

1860 }

1861

1865 .addUse(X86::NoRegister)

1867 .addUse(X86::NoRegister)

1873 }

1874

1875 if (!IsWin64Prologue && !IsFunclet) {

1876

1883

1884 if (NeedsDwarfCFI) {

1885 if (ArgBaseReg.isValid()) {

1887 CfaExpr.push_back(dwarf::DW_CFA_expression);

1889 unsigned DwarfReg = TRI->getDwarfRegNum(MachineFramePtr, true);

1892 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));

1894

1898 } else {

1899

1900

1901 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);

1906 }

1907 }

1908

1909 if (NeedsWinFPO) {

1910

1911 HasWinCFI = true;

1916 }

1917 }

1918 }

1919 } else {

1920 assert(!IsFunclet && "funclets without FPs not yet implemented");

1921 NumBytes =

1923 }

1924

1925

1926

1927 if (!IsFunclet) {

1928 if (HasFP && TRI->hasStackRealignment(MF))

1930 else

1932 }

1933

1934

1935

1936 unsigned ParentFrameNumBytes = NumBytes;

1937 if (IsFunclet)

1938 NumBytes = getWinEHFuncletFrameSize(MF);

1939

1940

1941 bool PushedRegs = false;

1946 return false;

1947 unsigned Opc = MBBI->getOpcode();

1948 return Opc == X86::PUSH32r || Opc == X86::PUSH64r || Opc == X86::PUSHP64r ||

1949 Opc == X86::PUSH2 || Opc == X86::PUSH2P;

1950 };

1951

1952 while (IsCSPush(MBBI)) {

1953 PushedRegs = true;

1954 Register Reg = MBBI->getOperand(0).getReg();

1955 LastCSPush = MBBI;

1957 unsigned Opc = LastCSPush->getOpcode();

1958

1959 if (!HasFP && NeedsDwarfCFI) {

1960

1961

1963

1964

1965 if (Opc == X86::PUSH2 || Opc == X86::PUSH2P)

1971 }

1972

1973 if (NeedsWinCFI) {

1974 HasWinCFI = true;

1978 if (Opc == X86::PUSH2 || Opc == X86::PUSH2P)

1980 .addImm(LastCSPush->getOperand(1).getReg())

1982 }

1983 }

1984

1985

1986

1987

1988 if (!IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF) &&

1989 !ArgBaseReg.isValid()) {

1990 assert(HasFP && "There should be a frame pointer if stack is realigned.");

1992

1993 if (NeedsWinCFI) {

1994 HasWinCFI = true;

1998 }

1999 }

2000

2001

2002

2003

2004 NumBytes = mergeSPUpdates(

2006 true);

2007

2008

2009

2010

2011

2012

2013

2014

2015

2016

2017

2018 uint64_t AlignedNumBytes = NumBytes;

2019 if (IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF))

2020 AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);

2021 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) {

2023 "The Red Zone is not accounted for in stack probes");

2024

2025

2027

2028 if (isEAXAlive) {

2030

2034 } else {

2035

2039 }

2040 }

2041

2043

2044

2045 int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;

2049 } else {

2050

2051

2053 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)

2055 }

2056

2057

2059

2060 if (isEAXAlive) {

2061

2065 StackPtr, false, NumBytes - 8);

2066 else

2068 StackPtr, false, NumBytes - 4);

2071 }

2072 } else if (NumBytes) {

2074 }

2075

2076 if (NeedsWinCFI && NumBytes) {

2077 HasWinCFI = true;

2081 }

2082

2083 int SEHFrameOffset = 0;

2085 if (IsFunclet) {

2086 if (IsClrFunclet) {

2087

2088

2089

2090

2091 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);

2093 MBB.addLiveIn(Establisher);

2095 Establisher, false, PSPSlotOffset)

2098 ;

2099

2100

2102 false, PSPSlotOffset)

2103 .addReg(Establisher)

2105 NoInfo,

2108 }

2109 SPOrEstablisher = Establisher;

2110 } else {

2112 }

2113

2114 if (IsWin64Prologue && HasFP) {

2115

2116

2117

2119 if (SEHFrameOffset)

2121 SPOrEstablisher, false, SEHFrameOffset);

2122 else

2124 .addReg(SPOrEstablisher);

2125

2126

2127 if (NeedsWinCFI && !IsFunclet) {

2128 assert(!NeedsWinFPO && "this setframe incompatible with FPO data");

2129 HasWinCFI = true;

2132 .addImm(SEHFrameOffset)

2136 }

2137 } else if (IsFunclet && STI.is32Bit()) {

2138

2140

2141

2142 if (MBB.isCleanupFuncletEntry()) {

2147

2149 false, EHRegOffset)

2151 }

2152 }

2153

2157

2158 if (NeedsWinCFI) {

2159 int FI;

2160 if (Register Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {

2161 if (X86::FR64RegClass.contains(Reg)) {

2164 if (IsWin64Prologue && IsFunclet)

2166 else

2169 SEHFrameOffset;

2170

2171 HasWinCFI = true;

2172 assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data");

2177 }

2178 }

2179 }

2180 }

2181

2182 if (NeedsWinCFI && HasWinCFI)

2185

2186 if (FnHasClrFunclet && !IsFunclet) {

2187

2188

2189

2190 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);

2194 PSPSlotOffset)

2199 }

2200

2201

2202

2203

2204 if (IsWin64Prologue && TRI->hasStackRealignment(MF)) {

2205 assert(HasFP && "There should be a frame pointer if stack is realigned.");

2206 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);

2207 }

2208

2209

2210 if (IsFunclet && STI.is32Bit())

2211 return;

2212

2213

2214

2215

2216

2217 if (TRI->hasBasePointer(MF)) {

2218

2221 .addReg(SPOrEstablisher)

2224

2225

2229 .addReg(SPOrEstablisher)

2231 }

2232

2234

2235

2236

2237

2243 assert(UsedReg == BasePtr);

2247 }

2248 }

2249 if (ArgBaseReg.isValid()) {

2250

2252 int FI = MI->getOperand(1).getIndex();

2253 unsigned MOVmr = Is64Bit ? X86::MOV64mr : X86::MOV32mr;

2254

2258 }

2259

2260 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {

2261

2262 if (!HasFP && NumBytes) {

2263

2269 }

2270

2271

2273 }

2274

2275

2276

2277

2278

2279

2280

2281

2282

2283

2284

2285

2286

2288 bool NeedsCLD = false;

2289

2292 if (MI.isCall()) {

2293 NeedsCLD = true;

2294 break;

2295 }

2296

2298 NeedsCLD = true;

2299 break;

2300 }

2301

2302 if (MI.isInlineAsm()) {

2303

2304

2305

2306 NeedsCLD = true;

2307 break;

2308 }

2309 }

2310 }

2311

2312 if (NeedsCLD) {

2315 }

2316 }

2317

2318

2320}

2321

2324

2325

2326

2327

2328

2329

2331}

2332

2334 switch (MI.getOpcode()) {

2335 case X86::CATCHRET:

2336 case X86::CLEANUPRET:

2337 return true;

2338 default:

2339 return false;

2340 }

2342}

2343

2344

2345

2346

2347

2348

2349

2350

2351

2352

2353

2354

2355

2356

2357unsigned

2358X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {

2362 true)

2365 return static_cast<unsigned>(Offset);

2366}

2367

2368unsigned

2369X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {

2370 const X86MachineFunctionInfo *X86FI = MF.getInfo();

2371

2373

2375 unsigned XMMSize =

2376 WinEHXMMSlotInfo.size() * TRI->getSpillSize(X86::VR128RegClass);

2377

2378 unsigned UsedSize;

2382

2383

2384

2385 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;

2386 } else {

2387

2389 }

2390

2391

2392

2394

2395

2396 return FrameSizeMinusRBP + XMMSize - CSSize;

2397}

2398

2400 return Opc == X86::TCRETURNri || Opc == X86::TCRETURN_WIN64ri ||

2401 Opc == X86::TCRETURN_HIPE32ri || Opc == X86::TCRETURNdi ||

2402 Opc == X86::TCRETURNmi || Opc == X86::TCRETURNri64 ||

2403 Opc == X86::TCRETURNri64_ImpCall || Opc == X86::TCRETURNdi64 ||

2404 Opc == X86::TCRETURNmi64 || Opc == X86::TCRETURN_WINmi64;

2405}

2406

2415 DL = MBBI->getDebugLoc();

2416

2417 const bool Is64BitILP32 = STI.isTarget64BitILP32();

2421

2423 bool NeedsWin64CFI =

2426

2427

2429 uint64_t MaxAlign = calculateMaxStackAlign(MF);

2432 bool HasFP = hasFP(MF);

2434

2435 bool NeedsDwarfCFI = (!MF.getTarget().getTargetTriple().isOSDarwin() &&

2439

2442 unsigned Opc = X86::LEA32r;

2443 Register StackReg = X86::ESP;

2444 ArgBaseReg = MI->getOperand(0).getReg();

2445 if (STI.is64Bit()) {

2446 Opc = X86::LEA64r;

2447 StackReg = X86::RSP;

2448 }

2449

2450

2454 .addUse(X86::NoRegister)

2456 .addUse(X86::NoRegister)

2458 if (NeedsDwarfCFI) {

2459 unsigned DwarfStackPtr = TRI->getDwarfRegNum(StackReg, true);

2464 }

2466 }

2467

2468 if (IsFunclet) {

2469 assert(HasFP && "EH funclets without FP not yet implemented");

2470 NumBytes = getWinEHFuncletFrameSize(MF);

2471 } else if (HasFP) {

2472

2474 NumBytes = FrameSize - CSSize - TailCallArgReserveSize;

2475

2476

2477

2478 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)

2479 NumBytes = alignTo(FrameSize, MaxAlign);

2480 } else {

2481 NumBytes = StackSize - CSSize - TailCallArgReserveSize;

2482 }

2483 uint64_t SEHStackAllocAmt = NumBytes;

2484

2485

2487 if (HasFP) {

2489

2492 }

2493

2496 MachineFramePtr)

2498

2499

2500

2503 .addUse(MachineFramePtr)

2506 }

2507

2508 if (NeedsDwarfCFI) {

2509 if (!ArgBaseReg.isValid()) {

2510 unsigned DwarfStackPtr =

2511 TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true);

2515 }

2516 if (MBB.succ_empty() && MBB.isReturnBlock()) {

2517 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);

2522 --AfterPop;

2523 }

2525 }

2526 }

2527

2529

2530 while (MBBI != MBB.begin()) {

2532 unsigned Opc = PI->getOpcode();

2533

2534 if (Opc != X86::DBG_VALUE && !PI->isTerminator()) {

2536 (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::BTR64ri8 &&

2537 Opc != X86::ADD64ri32 && Opc != X86::POPP64r && Opc != X86::POP2 &&

2538 Opc != X86::POP2P && Opc != X86::LEA64r))

2539 break;

2540 FirstCSPop = PI;

2541 }

2542

2544 }

2545 if (ArgBaseReg.isValid()) {

2546

2548 int FI = MI->getOperand(1).getIndex();

2549 unsigned MOVrm = Is64Bit ? X86::MOV64rm : X86::MOV32rm;

2550

2553 }

2554 MBBI = FirstCSPop;

2555

2556 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)

2557 emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator);

2558

2560 DL = MBBI->getDebugLoc();

2561

2562

2565

2566

2567

2568

2569

2571 !IsFunclet) {

2572 if (TRI->hasStackRealignment(MF))

2573 MBBI = FirstCSPop;

2576 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;

2577

2579 LEAAmount -= 16;

2580

2581

2582

2583

2584

2585

2586

2587

2588 if (LEAAmount != 0) {

2591 false, LEAAmount);

2593 } else {

2597 }

2598 } else if (NumBytes) {

2599

2601 if (!HasFP && NeedsDwarfCFI) {

2602

2605 nullptr, CSSize + TailCallArgReserveSize + SlotSize),

2607 }

2609 }

2610

2611 if (NeedsWin64CFI && MF.hasWinCFI())

2613

2614 if (!HasFP && NeedsDwarfCFI) {

2615 MBBI = FirstCSPop;

2617

2618

2619 while (MBBI != MBB.end()) {

2621 unsigned Opc = PI->getOpcode();

2623 if (Opc == X86::POP32r || Opc == X86::POP64r || Opc == X86::POPP64r ||

2624 Opc == X86::POP2 || Opc == X86::POP2P) {

2626

2627

2628 if (Opc == X86::POP2 || Opc == X86::POP2P)

2633 }

2634 }

2635 }

2636

2637

2638

2639

2640 if (NeedsDwarfCFI && MBB.succ_empty())

2642

2643 if (Terminator == MBB.end() || isTailCallOpcode(Terminator->getOpcode())) {

2644

2646 assert(Delta <= 0 && "TCDelta should never be positive");

2647 if (Delta) {

2648

2651 }

2652 }

2653

2654

2656 BuildMI(MBB, Terminator, DL, TII.get(X86::TILERELEASE));

2657

2658 if (NeedsWin64CFI && MF.hasWinCFI())

2659 BuildMI(MBB, Terminator, DL, TII.get(X86::SEH_EndEpilogue));

2660}

2661

2663 int FI,

2666

2668

2669

2670

2671 if (TRI->hasBasePointer(MF))

2672 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister();

2673 else if (TRI->hasStackRealignment(MF))

2674 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister();

2675 else

2676 FrameReg = TRI->getFrameRegister(MF);

2677

2678

2679

2680

2681

2687 int64_t FPDelta = 0;

2688

2689

2690

2691

2692

2696 }

2697

2698 if (IsWin64Prologue) {

2700

2701

2703

2704

2707 uint64_t NumBytes = FrameSize - CSSize;

2708

2710 if (FI && FI == X86FI->getFAIndex())

2712

2713

2714

2715

2716

2717 FPDelta = FrameSize - SEHFrameOffset;

2719 "FPDelta isn't aligned per the Win64 ABI!");

2720 }

2721

2722 if (FrameReg == TRI->getFramePtr()) {

2723

2725

2726

2728

2729

2731 if (TailCallReturnAddrDelta < 0)

2732 Offset -= TailCallReturnAddrDelta;

2733

2735 }

2736

2737

2738

2739

2740 if (TRI->hasStackRealignment(MF) || TRI->hasBasePointer(MF))

2743}

2744

2750 const auto it = WinEHXMMSlotInfo.find(FI);

2751

2752 if (it == WinEHXMMSlotInfo.end())

2754

2755 FrameReg = TRI->getStackRegister();

2757 it->second;

2758}

2759

2763 int Adjustment) const {

2765 FrameReg = TRI->getStackRegister();

2768}

2769

2773 bool IgnoreSPUpdates) const {

2774

2776

2778

2779

2780

2781

2782

2783

2784

2785

2786

2787

2788

2789

2790

2791

2792

2793

2794

2795

2796

2797

2798

2799

2800

2801

2802

2803

2804

2805

2806

2807

2808

2809

2810

2812 STI.isTargetWin64())

2814

2815

2816

2817

2820

2821

2823 "we don't handle this case!");

2824

2825

2826

2827

2828

2829

2830

2831

2832

2833

2834

2835

2836

2837

2838

2839

2840

2841

2842

2843

2844

2845

2846

2847

2848

2849

2851}

2852

2855 std::vector &CSI) const {

2858

2859 unsigned CalleeSavedFrameSize = 0;

2860 unsigned XMMCalleeSavedFrameSize = 0;

2863

2865

2866 if (TailCallReturnAddrDelta < 0) {

2867

2868

2869

2870

2871

2872

2873

2874

2875

2877 TailCallReturnAddrDelta - SlotSize, true);

2878 }

2879

2880

2881 if (this->TRI->hasBasePointer(MF)) {

2882

2887 }

2888 }

2889

2890 if (hasFP(MF)) {

2891

2892 SpillSlotOffset -= SlotSize;

2894

2895

2896

2898 SpillSlotOffset -= SlotSize;

2900 SpillSlotOffset -= SlotSize;

2901 }

2902

2903

2904

2905

2907 for (unsigned i = 0; i < CSI.size(); ++i) {

2908 if (TRI->regsOverlap(CSI[i].getReg(), FPReg)) {

2909 CSI.erase(CSI.begin() + i);

2910 break;

2911 }

2912 }

2913 }

2914

2915

2916

2917

2918

2919

2920

2921

2922

2923

2924

2925 unsigned NumRegsForPush2 = 0;

2928 return X86::GR64RegClass.contains(I.getReg());

2929 });

2930 bool NeedPadding = (SpillSlotOffset % 16 != 0) && (NumCSGPR % 2 == 0);

2931 bool UsePush2Pop2 = NeedPadding ? NumCSGPR > 2 : NumCSGPR > 1;

2933 NumRegsForPush2 = UsePush2Pop2 ? alignDown(NumCSGPR, 2) : 0;

2935 SpillSlotOffset -= SlotSize;

2937 }

2938 }

2939

2940

2943

2944 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))

2945 continue;

2946

2947

2948

2950 (SpillSlotOffset % 16 == 0 ||

2953

2954 SpillSlotOffset -= SlotSize;

2955 CalleeSavedFrameSize += SlotSize;

2956

2959 }

2960

2961

2962

2964 SpillSlotOffset -= SlotSize;

2965 CalleeSavedFrameSize += SlotSize;

2966

2968

2970 }

2972 "Expect even candidates for push2/pop2");

2974 ++NumFunctionUsingPush2Pop2;

2977

2978

2981 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))

2982 continue;

2983

2984

2985 MVT VT = MVT::Other;

2986 if (X86::VK16RegClass.contains(Reg))

2987 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;

2988

2990 unsigned Size = TRI->getSpillSize(*RC);

2991 Align Alignment = TRI->getSpillAlign(*RC);

2992

2993 assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86");

2994 SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment);

2995

2996

2997 SpillSlotOffset -= Size;

3001

3002

3003 if (X86::VR128RegClass.contains(Reg)) {

3004 WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize;

3005 XMMCalleeSavedFrameSize += Size;

3006 }

3007 }

3008

3009 return true;

3010}

3011

3016

3017

3018

3019 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows())

3020 return true;

3021

3022

3026 assert(SlotSize == 8 && "Unexpected slot size for padding!");

3030 }

3031

3032

3033

3034 auto UpdateLiveInCheckCanKill = [&](Register Reg) {

3036

3037

3038

3039

3040

3041 if (MRI.isLiveIn(Reg))

3042 return false;

3043 MBB.addLiveIn(Reg);

3044

3046 if (MRI.isLiveIn(*AReg))

3047 return false;

3048 return true;

3049 };

3050 auto UpdateLiveInGetKillRegState = [&](Register Reg) {

3052 };

3053

3054 for (auto RI = CSI.rbegin(), RE = CSI.rend(); RI != RE; ++RI) {

3056 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))

3057 continue;

3058

3062 .addReg(Reg, UpdateLiveInGetKillRegState(Reg))

3063 .addReg(Reg2, UpdateLiveInGetKillRegState(Reg2))

3065 } else {

3067 .addReg(Reg, UpdateLiveInGetKillRegState(Reg))

3069 }

3070 }

3071

3073 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;

3074 Register BaseReg = this->TRI->getBaseRegister();

3078 }

3079

3080

3081

3084 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))

3085 continue;

3086

3087

3088 MVT VT = MVT::Other;

3089 if (X86::VK16RegClass.contains(Reg))

3090 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;

3091

3092

3093 MBB.addLiveIn(Reg);

3095

3096 TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, Register(),

3098 }

3099

3100 return true;

3101}

3102

3106

3108 MBB.getParent()->getFunction().getPersonalityFn())) &&

3109 "SEH should not use CATCHRET");

3112

3113

3114 if (STI.is64Bit()) {

3115

3120 .addMBB(CatchRetTarget)

3122 } else {

3123

3125 .addMBB(CatchRetTarget);

3126 }

3127

3128

3129

3131}

3132

3136 if (CSI.empty())

3137 return false;

3138

3140

3141

3142 if (STI.is32Bit())

3143 return true;

3144

3145

3146 if (MI->getOpcode() == X86::CATCHRET) {

3147 const Function &F = MBB.getParent()->getFunction();

3150 if (IsSEH)

3151 return true;

3152 }

3153 }

3154

3156

3157

3160 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))

3161 continue;

3162

3163

3164 MVT VT = MVT::Other;

3165 if (X86::VK16RegClass.contains(Reg))

3166 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;

3167

3169 TII.loadRegFromStackSlot(MBB, MI, Reg, I.getFrameIdx(), RC, Register());

3170 }

3171

3172

3176 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;

3177 Register BaseReg = this->TRI->getBaseRegister();

3180 }

3181

3182

3183 for (auto I = CSI.begin(), E = CSI.end(); I != E; ++I) {

3185 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))

3186 continue;

3187

3192 else

3195 }

3198

3199 return true;

3200}

3201

3206

3207

3208 if (TRI->hasBasePointer(MF)) {

3209 Register BasePtr = TRI->getBaseRegister();

3210 if (STI.isTarget64BitILP32())

3212 SavedRegs.set(BasePtr);

3213 }

3214}

3215

3219 I++) {

3220 if (I->hasNestAttr() && I->use_empty())

3221 return true;

3222 }

3223 return false;

3224}

3225

3226

3227

3228

3229

3233

3234

3236 if (Is64Bit)

3237 return Primary ? X86::R14 : X86::R13;

3238 else

3239 return Primary ? X86::EBX : X86::EDI;

3240 }

3241

3242 if (Is64Bit) {

3243 if (IsLP64)

3244 return Primary ? X86::R11 : X86::R12;

3245 else

3246 return Primary ? X86::R11D : X86::R12D;

3247 }

3248

3250

3254 if (IsNested)

3255 report_fatal_error("Segmented stacks does not support fastcall with "

3256 "nested function.");

3257 return Primary ? X86::EAX : X86::ECX;

3258 }

3259 if (IsNested)

3260 return Primary ? X86::EDX : X86::EAX;

3261 return Primary ? X86::ECX : X86::EAX;

3262}

3263

3264

3265

3267

3272 unsigned TlsReg, TlsOffset;

3274

3275

3276

3277 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");

3278

3281 "Scratch register is live-in");

3282

3284 report_fatal_error("Segmented stacks do not support vararg functions.");

3285 if (STI.isTargetLinux() && STI.isTargetDarwin() && STI.isTargetWin32() &&

3286 STI.isTargetWin64() && STI.isTargetFreeBSD() &&

3287 STI.isTargetDragonFly())

3288 report_fatal_error("Segmented stacks not supported on this platform.");

3289

3290

3291

3292

3294

3296 return;

3297

3301 bool IsNested = false;

3302

3303

3306

3307

3308

3309

3310 for (const auto &LI : PrologueMBB.liveins()) {

3313 }

3314

3315 if (IsNested)

3317

3320

3321

3322

3324

3325

3327 if (STI.isTargetLinux()) {

3328 TlsReg = X86::FS;

3329 TlsOffset = IsLP64 ? 0x70 : 0x40;

3330 } else if (STI.isTargetDarwin()) {

3331 TlsReg = X86::GS;

3332 TlsOffset = 0x60 + 90 * 8;

3333 } else if (STI.isTargetWin64()) {

3334 TlsReg = X86::GS;

3335 TlsOffset = 0x28;

3336 } else if (STI.isTargetFreeBSD()) {

3337 TlsReg = X86::FS;

3338 TlsOffset = 0x18;

3339 } else if (STI.isTargetDragonFly()) {

3340 TlsReg = X86::FS;

3341 TlsOffset = 0x20;

3342 } else {

3343 report_fatal_error("Segmented stacks not supported on this platform.");

3344 }

3345

3346 if (CompareStackPointer)

3347 ScratchReg = IsLP64 ? X86::RSP : X86::ESP;

3348 else

3349 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r),

3350 ScratchReg)

3356

3357 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm))

3364 } else {

3365 if (STI.isTargetLinux()) {

3366 TlsReg = X86::GS;

3367 TlsOffset = 0x30;

3368 } else if (STI.isTargetDarwin()) {

3369 TlsReg = X86::GS;

3370 TlsOffset = 0x48 + 90 * 4;

3371 } else if (STI.isTargetWin32()) {

3372 TlsReg = X86::FS;

3373 TlsOffset = 0x14;

3374 } else if (STI.isTargetDragonFly()) {

3375 TlsReg = X86::FS;

3376 TlsOffset = 0x10;

3377 } else if (STI.isTargetFreeBSD()) {

3378 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");

3379 } else {

3380 report_fatal_error("Segmented stacks not supported on this platform.");

3381 }

3382

3383 if (CompareStackPointer)

3384 ScratchReg = X86::ESP;

3385 else

3386 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg)

3392

3393 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||

3394 STI.isTargetDragonFly()) {

3395 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))

3402 } else if (STI.isTargetDarwin()) {

3403

3404

3405 unsigned ScratchReg2;

3406 bool SaveScratch2;

3407 if (CompareStackPointer) {

3408

3410 SaveScratch2 = false;

3411 } else {

3412

3414

3415

3416

3418 }

3419

3420

3422 "Scratch register is live-in and not saved");

3423

3424 if (SaveScratch2)

3425 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))

3427

3428 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)

3430 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))

3432 .addReg(ScratchReg2)

3437

3438 if (SaveScratch2)

3439 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);

3440 }

3441 }

3442

3443

3444

3446 .addMBB(&PrologueMBB)

3448

3449

3450

3452

3453

3454

3455 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;

3456 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;

3457 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;

3458 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;

3459

3460 if (IsNested)

3462

3467 Reg11)

3469 } else {

3470 BuildMI(allocMBB, DL, TII.get(X86::PUSH32i))

3473 }

3474

3475

3477

3478

3479

3480

3481

3482

3483

3484

3485

3486

3487

3488

3489

3490

3491

3492 if (STI.useIndirectThunkCalls())

3493 report_fatal_error("Emitting morestack calls on 64-bit with the large "

3494 "code model and thunks not yet implemented.");

3495 BuildMI(allocMBB, DL, TII.get(X86::CALL64m))

3501 } else {

3503 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))

3505 else

3506 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))

3508 }

3509

3510 if (IsNested)

3511 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));

3512 else

3513 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));

3514

3516

3519

3520#ifdef EXPENSIVE_CHECKS

3522#endif

3523}

3524

3525

3526

3527

3528

3531 for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {

3533 if (Node->getNumOperands() != 2)

3534 continue;

3537 if (!NodeName || !NodeVal)

3538 continue;

3540 if (ValConst && NodeName->getString() == LiteralName) {

3542 }

3543 }

3544

3546 " required but not provided");

3547}

3548

3549

3550

3554 MBB.successors(),

3555 [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) &&

3557 return MI.isMetaInstruction();

3558 });

3559}

3560

3561

3562

3563

3564

3565

3566

3567

3568

3569

3570

3571

3572

3573

3574

3575

3580

3581

3582

3583 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");

3584

3585

3588 if (!HiPELiteralsMD)

3590 "Can't generate HiPE prologue without runtime parameters");

3592 HiPELiteralsMD, Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");

3593 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;

3594 const unsigned Guaranteed = HipeLeafWords * SlotSize;

3595 unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs

3597 : 0;

3599

3601 "HiPE prologue is only supported on Linux operating systems.");

3602

3603

3604

3605

3606

3607

3608

3609

3611 unsigned MoreStackForCalls = 0;

3612

3613 for (auto &MBB : MF) {

3614 for (auto &MI : MBB) {

3615 if (MI.isCall())

3616 continue;

3617

3618

3620

3621

3623 continue;

3624

3626 if (F)

3627 continue;

3628

3629

3630

3631

3632

3633

3634 if (F->getName().contains("erlang.") || F->getName().contains("bif_") ||

3636 continue;

3637

3638 unsigned CalleeStkArity = F->arg_size() > CCRegisteredArgs

3639 ? F->arg_size() - CCRegisteredArgs

3640 : 0;

3641 if (HipeLeafWords - 1 > CalleeStkArity)

3642 MoreStackForCalls =

3643 std::max(MoreStackForCalls,

3644 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);

3645 }

3646 }

3647 MaxStack += MoreStackForCalls;

3648 }

3649

3650

3651

3652 if (MaxStack > Guaranteed) {

3655

3656 for (const auto &LI : PrologueMBB.liveins()) {

3659 }

3660

3663

3664 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;

3665 unsigned LEAop, CMPop, CALLop;

3666 SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");

3668 SPReg = X86::RSP;

3669 PReg = X86::RBP;

3670 LEAop = X86::LEA64r;

3671 CMPop = X86::CMP64rm;

3672 CALLop = X86::CALL64pcrel32;

3673 } else {

3674 SPReg = X86::ESP;

3675 PReg = X86::EBP;

3676 LEAop = X86::LEA32r;

3677 CMPop = X86::CMP32rm;

3678 CALLop = X86::CALLpcrel32;

3679 }

3680

3683 "HiPE prologue scratch register is live-in");

3684

3685

3687 false, -MaxStack);

3688

3690 PReg, false, SPLimitOffset);

3691 BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1))

3692 .addMBB(&PrologueMBB)

3694

3695

3698 false, -MaxStack);

3700 PReg, false, SPLimitOffset);

3701 BuildMI(incStackMBB, DL, TII.get(X86::JCC_1))

3702 .addMBB(incStackMBB)

3704

3705 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100});

3706 stackCheckMBB->addSuccessor(incStackMBB, {1, 100});

3707 incStackMBB->addSuccessor(&PrologueMBB, {99, 100});

3708 incStackMBB->addSuccessor(incStackMBB, {1, 100});

3709 }

3710#ifdef EXPENSIVE_CHECKS

3712#endif

3713}

3714

3720 return false;

3721

3723 return false;

3724

3726

3727 if (NumPops != 1 && NumPops != 2)

3728 return false;

3729

3730

3731

3732 if (MBBI == MBB.begin())

3733 return false;

3735 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())

3736 return false;

3737

3738 unsigned Regs[2];

3739 unsigned FoundRegs = 0;

3740

3742 const MachineOperand &RegMask = Prev->getOperand(1);

3743

3744 auto &RegClass =

3745 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;

3746

3747 for (auto Candidate : RegClass) {

3748

3749

3750

3752 continue;

3753

3754

3755 if (MRI.isReserved(Candidate))

3756 continue;

3757

3758 bool IsDef = false;

3759 for (const MachineOperand &MO : Prev->implicit_operands()) {

3760 if (MO.isReg() && MO.isDef() &&

3761 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {

3762 IsDef = true;

3763 break;

3764 }

3765 }

3766

3767 if (IsDef)

3768 continue;

3769

3770 Regs[FoundRegs++] = Candidate;

3771 if (FoundRegs == (unsigned)NumPops)

3772 break;

3773 }

3774

3775 if (FoundRegs == 0)

3776 return false;

3777

3778

3779 while (FoundRegs < (unsigned)NumPops)

3780 Regs[FoundRegs++] = Regs[0];

3781

3782 for (int i = 0; i < NumPops; ++i)

3784 Regs[i]);

3785

3786 return true;

3787}

3788

3793 unsigned Opcode = I->getOpcode();

3794 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();

3795 DebugLoc DL = I->getDebugLoc();

3797 uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0;

3798 I = MBB.erase(I);

3800

3801

3802

3803

3805 return I;

3806

3807 if (!reserveCallFrame) {

3808

3809

3810

3811

3812

3813

3814

3816

3820

3821

3822

3823

3824

3825

3826

3827

3828 bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty();

3829

3830 if (HasDwarfEHHandlers && !isDestroy &&

3834

3835 if (Amount == 0)

3836 return I;

3837

3838

3839

3840 Amount -= InternalAmt;

3841

3842

3843

3844

3845 if (isDestroy && InternalAmt && DwarfCFI && hasFP(MF))

3848

3849

3850 int64_t StackAdjustment = isDestroy ? Amount : -Amount;

3851 int64_t CfaAdjustment = StackAdjustment;

3852

3853 if (StackAdjustment) {

3854

3855

3856

3857

3860 CfaAdjustment += Offset;

3861 };

3862 auto CalcNewOffset = [&StackAdjustment](int64_t Offset) {

3863 return StackAdjustment + Offset;

3864 };

3865 StackAdjustment =

3866 mergeSPUpdates(MBB, InsertPos, CalcCfaAdjust, CalcNewOffset, true);

3867 StackAdjustment =

3868 mergeSPUpdates(MBB, InsertPos, CalcCfaAdjust, CalcNewOffset, false);

3869

3870 if (StackAdjustment) {

3871 if (!(F.hasMinSize() &&

3872 adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))

3873 BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,

3874 false);

3875 }

3876 }

3877

3879

3880

3881

3882

3883

3884

3885

3886

3887

3889 MBB, InsertPos, DL,

3891 }

3892

3893 return I;

3894 }

3895

3896 if (InternalAmt) {

3899 while (CI != B && !std::prev(CI)->isCall())

3900 --CI;

3901 BuildStackAdjustment(MBB, CI, DL, -InternalAmt, false);

3902 }

3903

3904 return I;

3905}

3906

3908 assert(MBB.getParent() && "Block is not attached to a function!");

3910 if (MBB.isLiveIn(X86::EFLAGS))

3911 return true;

3912

3913

3914

3917 if (TLI.hasInlineStackProbe(MF) || TLI.hasStackProbeSymbol(MF))

3918 return false;

3919

3922}

3923

3925 assert(MBB.getParent() && "Block is not attached to a function!");

3926

3927

3928

3929

3930

3931 if (STI.isTargetWin64() && MBB.succ_empty() && MBB.isReturnBlock())

3932 return false;

3933

3934

3935

3939

3941 return true;

3942

3943

3944

3945

3946

3948}

3949

3951

3952

3953 bool CompactUnwind =

3956 !CompactUnwind) &&

3957

3958

3959

3960

3961

3964}

3965

3968 const DebugLoc &DL, bool RestoreSP) const {

3969 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");

3970 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");

3972 "restoring EBP/ESI on non-32-bit target");

3973

3976 Register BasePtr = TRI->getBaseRegister();

3980

3981

3982

3984 int EHRegSize = MFI.getObjectSize(FI);

3985

3986 if (RestoreSP) {

3987

3989 X86::EBP, true, -EHRegSize)

3991 }

3992

3995 int EndOffset = -EHRegOffset - EHRegSize;

3997

3999

4007 assert(EndOffset >= 0 &&

4008 "end of registration object above normal EBP position!");

4009 } else if (UsedReg == BasePtr) {

4010

4014

4019 assert(UsedReg == BasePtr);

4021 UsedReg, true, Offset)

4023 } else {

4024 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");

4025 }

4026 return MBBI;

4027}

4028

4030 return TRI->getSlotSize();

4031}

4032

4037

4045 FrameBase.Kind = DwarfFrameBase::CFA;

4048 return FrameBase;

4049 }

4050

4051 return DwarfFrameBase{DwarfFrameBase::Register, {FrameRegister}};

4052}

4053

4054namespace {

4055

4056struct X86FrameSortingObject {

4057 bool IsValid = false;

4058 unsigned ObjectIndex = 0;

4059 unsigned ObjectSize = 0;

4060 Align ObjectAlignment = Align(1);

4061 unsigned ObjectNumUses = 0;

4062};

4063

4064

4065

4066

4067

4068

4069

4070

4071

4072

4073

4074

4075

4076

4077struct X86FrameSortingComparator {

4078 inline bool operator()(const X86FrameSortingObject &A,

4079 const X86FrameSortingObject &B) const {

4080 uint64_t DensityAScaled, DensityBScaled;

4081

4082

4083

4084

4085 if (A.IsValid)

4086 return false;

4087 if (B.IsValid)

4088 return true;

4089

4090

4091

4092

4093

4094

4095

4096

4097

4098

4099

4100 DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *

4101 static_cast<uint64_t>(B.ObjectSize);

4102 DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *

4103 static_cast<uint64_t>(A.ObjectSize);

4104

4105

4106

4107

4108

4109

4110

4111

4112

4113 if (DensityAScaled == DensityBScaled)

4114 return A.ObjectAlignment < B.ObjectAlignment;

4115

4116 return DensityAScaled < DensityBScaled;

4117 }

4118};

4119}

4120

4121

4122

4123

4124

4128

4129

4130 if (ObjectsToAllocate.empty())

4131 return;

4132

4133

4134

4135

4136

4137

4138 std::vector SortingObjects(MFI.getObjectIndexEnd());

4139

4140

4141

4142 for (auto &Obj : ObjectsToAllocate) {

4143 SortingObjects[Obj].IsValid = true;

4144 SortingObjects[Obj].ObjectIndex = Obj;

4145 SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj);

4146

4148 if (ObjectSize == 0)

4149

4150 SortingObjects[Obj].ObjectSize = 4;

4151 else

4152 SortingObjects[Obj].ObjectSize = ObjectSize;

4153 }

4154

4155

4156 for (auto &MBB : MF) {

4157 for (auto &MI : MBB) {

4158 if (MI.isDebugInstr())

4159 continue;

4161

4162 if (!MO.isFI())

4163 continue;

4164 int Index = MO.getIndex();

4165

4166

4168 SortingObjects[Index].IsValid)

4169 SortingObjects[Index].ObjectNumUses++;

4170 }

4171 }

4172 }

4173

4174

4175

4177

4178

4179

4180

4181

4182

4183 int i = 0;

4184 for (auto &Obj : SortingObjects) {

4185

4186 if (!Obj.IsValid)

4187 break;

4188 ObjectsToAllocate[i++] = Obj.ObjectIndex;

4189 }

4190

4191

4192 if (TRI->hasStackRealignment(MF) && hasFP(MF))

4193 std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());

4194}

4195

4196unsigned

4198

4199 unsigned Offset = 16;

4200

4202

4204

4205 Offset += getWinEHFuncletFrameSize(MF);

4207}

4208

4211

4212

4214

4216

4217

4220 RS->addScavengingFrameIndex(FI);

4221 }

4222

4223

4224

4227

4228

4229

4233 adjustFrameForMsvcCxxEh(MF);

4234 }

4235}

4236

4237void X86FrameLowering::adjustFrameForMsvcCxxEh(MachineFunction &MF) const {

4238

4239

4240

4241

4242

4245 int64_t MinFixedObjOffset = -SlotSize;

4247 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I));

4248

4251 int FrameIndex = H.CatchObj.FrameIndex;

4252 if ((FrameIndex != INT_MAX) && MFI.getObjectOffset(FrameIndex) == 0) {

4253

4255 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;

4256 MinFixedObjOffset -= MFI.getObjectSize(FrameIndex);

4258 }

4259 }

4260 }

4261

4262

4263 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;

4264 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;

4265 int UnwindHelpFI =

4268

4269

4270

4271 MachineBasicBlock &MBB = MF.front();

4275

4278 UnwindHelpFI)

4280}

4281

4285

4288

4289

4291 MI->eraseFromParent();

4293 }

4294}

4295

4298

4299

4300

4304 bool NeedsRestore = MBB.isEHPad() && MBB.isEHFuncletEntry();

4305 if (NeedsRestore)

4307 IsSEH);

4308 }

4309}

4310

4311

4312

4315 unsigned NumSpilledRegs) {

4317 unsigned AllocSize = TRI->getSpillSize(*RC) * NumSpilledRegs;

4319 unsigned AlignedSize = alignTo(AllocSize, StackAlign);

4320 return AlignedSize - AllocSize;

4321}

4322

4323void X86FrameLowering::spillFPBPUsingSP(MachineFunction &MF,

4326 int SPAdjust) const {

4328

4329 MachineBasicBlock *MBB = BeforeMI->getParent();

4330 DebugLoc DL = BeforeMI->getDebugLoc();

4331

4332

4333 if (FP.isValid()) {

4337 }

4338

4339

4344 }

4345

4346

4347 if (SPAdjust)

4349

4350

4351 if (FP.isValid() && needsDwarfCFI(MF)) {

4352

4353 unsigned CFIIndex =

4355 BuildMI(*MBB, BeforeMI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))

4357

4358

4359

4360 SmallString<64> CfaExpr;

4361 uint8_t buffer[16];

4362 int Offset = SPAdjust;

4364 Offset += TRI->getSpillSize(*TRI->getMinimalPhysRegClass(BP));

4365

4366

4367 if (TII.isFrameSetup(*BeforeMI)) {

4369 BeforeMI = std::next(BeforeMI);

4370 }

4372 if (STI.isTarget64BitILP32())

4374 unsigned DwarfStackPtr = TRI->getDwarfRegNum(StackPtr, true);

4375 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfStackPtr));

4377 CfaExpr.push_back(dwarf::DW_OP_deref);

4378 CfaExpr.push_back(dwarf::DW_OP_consts);

4380 CfaExpr.push_back((uint8_t)dwarf::DW_OP_plus);

4381

4382 SmallString<64> DefCfaExpr;

4383 DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);

4385 DefCfaExpr.append(CfaExpr.str());

4389 }

4390}

4391

4392void X86FrameLowering::restoreFPBPUsingSP(MachineFunction &MF,

4395 int SPAdjust) const {

4397

4398

4399 MachineBasicBlock *MBB = AfterMI->getParent();

4401 DebugLoc DL = AfterMI->getDebugLoc();

4402 if (SPAdjust)

4404

4405

4409 }

4410

4411

4412 if (FP.isValid()) {

4415

4416

4417 if (needsDwarfCFI(MF)) {

4418

4419 unsigned CFIIndex =

4421 BuildMI(*MBB, Pos, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))

4423 }

4424 }

4425}

4426

4427void X86FrameLowering::saveAndRestoreFPBPUsingSP(

4430 assert(SpillFP || SpillBP);

4431

4433 const TargetRegisterClass *RC;

4434 unsigned NumRegs = 0;

4435

4436 if (SpillFP) {

4437 FP = TRI->getFrameRegister(MF);

4438 if (STI.isTarget64BitILP32())

4440 RC = TRI->getMinimalPhysRegClass(FP);

4441 ++NumRegs;

4442 }

4443 if (SpillBP) {

4444 BP = TRI->getBaseRegister();

4445 if (STI.isTarget64BitILP32())

4447 RC = TRI->getMinimalPhysRegClass(BP);

4448 ++NumRegs;

4449 }

4451

4452 spillFPBPUsingSP(MF, BeforeMI, FP, BP, SPAdjust);

4453 restoreFPBPUsingSP(MF, AfterMI, FP, BP, SPAdjust);

4454}

4455

4456bool X86FrameLowering::skipSpillFPBP(

4458 if (MI->getOpcode() == X86::LCMPXCHG16B_SAVE_RBX) {

4459

4460

4461

4462

4463

4464 int FI;

4466 while (!(MI->getOpcode() == TargetOpcode::COPY &&

4467 MI->getOperand(1).getReg() == X86::RBX) &&

4468 !((Reg = TII.isStoreToStackSlot(*MI, FI)) && Reg == X86::RBX))

4469 ++MI;

4470 return true;

4471 }

4472 return false;

4473}

4474

4477 bool &AccessBP) {

4478 AccessFP = AccessBP = false;

4479 if (FP) {

4480 if (MI.findRegisterUseOperandIdx(FP, TRI, false) != -1 ||

4481 MI.findRegisterDefOperandIdx(FP, TRI, false, true) != -1)

4482 AccessFP = true;

4483 }

4484 if (BP) {

4485 if (MI.findRegisterUseOperandIdx(BP, TRI, false) != -1 ||

4486 MI.findRegisterDefOperandIdx(BP, TRI, false, true) != -1)

4487 AccessBP = true;

4488 }

4489 return AccessFP || AccessBP;

4490}

4491

4492

4493

4494

4496 if (MI.isCall())

4497 return false;

4498 if (InsideEHLabels)

4499 return true;

4500

4502 if (MBB->hasEHPadSuccessor())

4503 return false;

4504

4505

4508 if (MBBI->isCall())

4509 return false;

4510 return true;

4511}

4512

4513

4514

4515void X86FrameLowering::checkInterferedAccess(

4518 bool SpillBP) const {

4519 if (DefMI == KillMI)

4520 return;

4521 if (TRI->hasBasePointer(MF)) {

4522 if (!SpillBP)

4523 return;

4524 } else {

4525 if (!SpillFP)

4526 return;

4527 }

4528

4529 auto MI = KillMI;

4532 [](const MachineOperand &MO) { return MO.isFI(); }))

4534 "Interference usage of base pointer/frame "

4535 "pointer.");

4536 MI++;

4537 }

4538}

4539

4540

4541

4542

4543

4544

4545

4546

4547

4548

4549

4550

4551

4552

4553

4554

4555

4556

4557

4558

4559

4560

4564 if (TFI.hasFP(MF))

4565 FP = TRI->getFrameRegister(MF);

4566 if (TRI->hasBasePointer(MF))

4567 BP = TRI->getBaseRegister();

4568

4569

4570

4574 FP = 0;

4576 BP = 0;

4577 }

4578 if (FP && !BP)

4579 return;

4580

4582 bool InsideEHLabels = false;

4583 auto MI = MBB.rbegin(), ME = MBB.rend();

4584 auto TermMI = MBB.getFirstTerminator();

4585 if (TermMI == MBB.begin())

4586 continue;

4587 MI = *(std::prev(TermMI));

4588

4589 while (MI != ME) {

4590

4591

4592

4595 isInvoke(*MI, InsideEHLabels) || skipSpillFPBP(MF, MI)) {

4596 ++MI;

4597 continue;

4598 }

4599

4600 if (MI->getOpcode() == TargetOpcode::EH_LABEL) {

4601 InsideEHLabels = !InsideEHLabels;

4602 ++MI;

4603 continue;

4604 }

4605

4606 bool AccessFP, AccessBP;

4607

4609 ++MI;

4610 continue;

4611 }

4612

4613

4614

4615 bool FPLive = false, BPLive = false;

4616 bool SpillFP = false, SpillBP = false;

4618 do {

4619 SpillFP |= AccessFP;

4620 SpillBP |= AccessBP;

4621

4622

4623 if (FPLive && MI->findRegisterDefOperandIdx(FP, TRI, false, true) != -1)

4624 FPLive = false;

4625 if (FP && MI->findRegisterUseOperandIdx(FP, TRI, false) != -1)

4626 FPLive = true;

4627 if (BPLive && MI->findRegisterDefOperandIdx(BP, TRI, false, true) != -1)

4628 BPLive = false;

4629 if (BP && MI->findRegisterUseOperandIdx(BP, TRI, false) != -1)

4630 BPLive = true;

4631

4633 } while ((MI != ME) &&

4634 (FPLive || BPLive ||

4636

4637

4638 if (FPLive && !SpillBP)

4639 continue;

4640

4641

4642

4643 if (KillMI->isCall() && DefMI != ME) {

4644 auto FrameSetup = std::next(DefMI);

4645

4646

4647

4648 while (FrameSetup != ME && TII.isFrameSetup(*FrameSetup) &&

4649 !FrameSetup->isCall())

4650 ++FrameSetup;

4651

4652

4653 if (FrameSetup != ME && TII.isFrameSetup(*FrameSetup) &&

4654 (TII.getFrameSize(*FrameSetup) ||

4655 TII.getFrameAdjustment(*FrameSetup))) {

4656 while (TII.isFrameInstr(*KillMI))

4657 --KillMI;

4658 DefMI = FrameSetup;

4660 ++MI;

4661 }

4662 }

4663

4664 checkInterferedAccess(MF, DefMI, KillMI, SpillFP, SpillBP);

4665

4666

4667 saveAndRestoreFPBPUsingSP(MF, &(*DefMI), &(*KillMI), SpillFP, SpillBP);

4668 }

4669 }

4670}

unsigned const MachineRegisterInfo * MRI

MachineInstrBuilder MachineInstrBuilder & DefMI

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

static const uint64_t kSplitStackAvailable

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

MachineBasicBlock MachineBasicBlock::iterator MBBI

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

Analysis containing CSE Info

Module.h This file contains the declarations for the Module class.

static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)

This file implements the LivePhysRegs utility for tracking liveness of physical registers.

static bool isTailCallOpcode(unsigned Opc)

Register const TargetRegisterInfo * TRI

Promote Memory to Register

static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)

static constexpr MCPhysReg FPReg

static constexpr MCPhysReg SPReg

This file declares the machine register scavenger class.

static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

static bool is64Bit(const char *name)

static unsigned calculateSetFPREG(uint64_t SPAdjust)

Definition X86FrameLowering.cpp:1270

static unsigned GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary)

GetScratchRegister - Get a temp register for performing work in the segmented stack and the Erlang/Hi...

Definition X86FrameLowering.cpp:3230

static unsigned getADDriOpcode(bool IsLP64)

Definition X86FrameLowering.cpp:114

static unsigned getPUSH2Opcode(const X86Subtarget &ST)

Definition X86FrameLowering.cpp:170

static unsigned getMOVriOpcode(bool Use64BitReg, int64_t Imm)

Definition X86FrameLowering.cpp:134

static unsigned getLEArOpcode(bool IsLP64)

Definition X86FrameLowering.cpp:130

static unsigned getSUBriOpcode(bool IsLP64)

Definition X86FrameLowering.cpp:110

static bool flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB)

Check if the flags need to be preserved before the terminators.

Definition X86FrameLowering.cpp:194

static bool isFPBPAccess(const MachineInstr &MI, Register FP, Register BP, const TargetRegisterInfo *TRI, bool &AccessFP, bool &AccessBP)

Definition X86FrameLowering.cpp:4475

static bool isOpcodeRep(unsigned Opcode)

Return true if an opcode is part of the REP group of instructions.

Definition X86FrameLowering.cpp:1475

static unsigned getANDriOpcode(bool IsLP64, int64_t Imm)

Definition X86FrameLowering.cpp:126

static bool isEAXLiveIn(MachineBasicBlock &MBB)

Definition X86FrameLowering.cpp:177

static int computeFPBPAlignmentGap(MachineFunction &MF, const TargetRegisterClass *RC, unsigned NumSpilledRegs)

Definition X86FrameLowering.cpp:4313

static unsigned getADDrrOpcode(bool IsLP64)

Definition X86FrameLowering.cpp:122

static bool HasNestArgument(const MachineFunction *MF)

Definition X86FrameLowering.cpp:3216

static unsigned getPOPOpcode(const X86Subtarget &ST)

Definition X86FrameLowering.cpp:166

static bool isInvoke(const MachineInstr &MI, bool InsideEHLabels)

Definition X86FrameLowering.cpp:4495

static unsigned getPOP2Opcode(const X86Subtarget &ST)

Definition X86FrameLowering.cpp:173

static unsigned getHiPELiteral(NamedMDNode *HiPELiteralsMD, const StringRef LiteralName)

Lookup an ERTS parameter in the !hipe.literals named metadata node.

Definition X86FrameLowering.cpp:3529

static bool blockEndIsUnreachable(const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI)

Definition X86FrameLowering.cpp:3551

static unsigned getSUBrrOpcode(bool IsLP64)

Definition X86FrameLowering.cpp:118

static unsigned getPUSHOpcode(const X86Subtarget &ST)

Definition X86FrameLowering.cpp:162

constexpr uint64_t MaxSPChunk

Definition X86FrameLowering.cpp:228

static const unsigned FramePtr

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

reverse_iterator rend() const

bool empty() const

empty - Check if the array is empty.

reverse_iterator rbegin() const

iterator_range< const_set_bits_iterator > set_bits() const

static BranchProbability getOne()

static BranchProbability getZero()

The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...

This is the shared class of boolean and integer constants.

uint64_t getZExtValue() const

Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...

CallingConv::ID getCallingConv() const

getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...

bool hasPersonalityFn() const

Check whether this function has a personality function.

Constant * getPersonalityFn() const

Get the personality function associated with this function.

AttributeList getAttributes() const

Return the attribute list for this Function.

bool needsUnwindTableEntry() const

True if this function needs an unwind table.

const Argument * const_arg_iterator

bool isVarArg() const

isVarArg - Return true if this function takes a variable number of arguments.

bool hasFnAttribute(Attribute::AttrKind Kind) const

Return true if the function has the attribute.

Module * getParent()

Get the module that this global value is contained inside of...

bool usesWindowsCFI() const

static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register, SMLoc Loc={})

.cfi_def_cfa_register modifies a rule for computing CFA.

static MCCFIInstruction createGnuArgsSize(MCSymbol *L, int64_t Size, SMLoc Loc={})

A special wrapper for .cfi_escape that indicates GNU_ARGS_SIZE.

static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})

.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...

static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})

.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.

static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})

.cfi_offset Previous value of Register is saved at offset Offset from CFA.

static MCCFIInstruction createRememberState(MCSymbol *L, SMLoc Loc={})

.cfi_remember_state Save all current rules for all registers.

OpType getOperation() const

static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int64_t Offset, SMLoc Loc={})

.cfi_def_cfa_offset modifies a rule for computing CFA.

static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")

.cfi_escape Allows the user to add arbitrary bytes to the unwind info.

static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int64_t Adjustment, SMLoc Loc={})

.cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but Offset is a relative value that is added/subt...

static MCCFIInstruction createRestoreState(MCSymbol *L, SMLoc Loc={})

.cfi_restore_state Restore the previously saved state.

const MCObjectFileInfo * getObjectFileInfo() const

const MCRegisterInfo * getRegisterInfo() const

LLVM_ABI void reportError(SMLoc L, const Twine &Msg)

MCSection * getCompactUnwindSection() const

MCRegAliasIterator enumerates all registers aliasing Reg.

MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...

Wrapper class representing physical registers. Should be passed by value.

LLVM_ABI StringRef getString() const

LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)

Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...

MachineInstrBundleIterator< const MachineInstr > const_iterator

iterator_range< livein_iterator > liveins() const

const BasicBlock * getBasicBlock() const

Return the LLVM basic block that this instance corresponded to originally.

LLVM_ABI LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI, MCRegister Reg, const_iterator Before, unsigned Neighborhood=10) const

Return whether (physical) register Reg has been defined and not killed as of just before Before.

LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())

Add Succ as a successor of this MachineBasicBlock.

LLVM_ABI iterator getFirstNonPHI()

Returns a pointer to the first instruction in this block that is not a PHINode instruction.

LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)

Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.

MachineInstrBundleIterator< MachineInstr, true > reverse_iterator

void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())

Adds the specified register as a live in.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

LLVM_ABI instr_iterator erase(instr_iterator I)

Remove an instruction from the instruction list and delete it.

void splice(iterator Where, MachineBasicBlock *Other, iterator From)

Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...

MachineInstrBundleIterator< MachineInstr > iterator

@ LQR_Live

Register is known to be (at least partially) live.

void setMachineBlockAddressTaken()

Set this block to indicate that its address is used as something other than the target of a terminato...

LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const

Return true if the specified register is in the live in set.

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

bool needsSplitStackProlog() const

Return true if this function requires a split stack prolog, even if it uses no stack space.

LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)

Create a new object at a fixed location on the stack.

bool hasVarSizedObjects() const

This method may be called any time after instruction selection is complete to determine if the stack ...

uint64_t getStackSize() const

Return the number of bytes that must be allocated to hold all of the fixed size frame objects.

bool adjustsStack() const

Return true if this function adjusts the stack – e.g., when calling another function.

LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)

Create a new statically sized stack object, returning a nonnegative identifier to represent it.

LLVM_ABI void ensureMaxAlignment(Align Alignment)

Make sure the function is at least Align bytes aligned.

bool hasCalls() const

Return true if the current function has any function calls.

bool isFrameAddressTaken() const

This method may be called any time after instruction selection is complete to determine if there is a...

Align getMaxAlign() const

Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...

void setObjectOffset(int ObjectIdx, int64_t SPOffset)

Set the stack frame offset of the specified object.

uint64_t getMaxCallFrameSize() const

Return the maximum size of a call frame that must be allocated for an outgoing function call.

bool hasPatchPoint() const

This method may be called any time after instruction selection is complete to determine if there is a...

bool hasOpaqueSPAdjustment() const

Returns true if the function contains opaque dynamic stack adjustments.

void setCVBytesOfCalleeSavedRegisters(unsigned S)

LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)

Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...

LLVM_ABI uint64_t estimateStackSize(const MachineFunction &MF) const

Estimate and return the size of the stack frame.

Align getObjectAlign(int ObjectIdx) const

Return the alignment of the specified stack object.

int64_t getObjectSize(int ObjectIdx) const

Return the size of the specified object.

bool hasStackMap() const

This method may be called any time after instruction selection is complete to determine if there is a...

const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const

Returns a reference to call saved info vector for the current function.

int getObjectIndexEnd() const

Return one past the maximum frame object index.

bool hasCopyImplyingStackAdjustment() const

Returns true if the function contains operations which will lower down to instructions which manipula...

bool hasStackObjects() const

Return true if there are any stack objects in this function.

LLVM_ABI int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)

Create a spill slot at a fixed location on the stack.

int64_t getObjectOffset(int ObjectIdx) const

Return the assigned stack offset of the specified object from the incoming stack pointer.

void setStackSize(uint64_t Size)

Set the size of the stack.

bool isFixedObjectIndex(int ObjectIdx) const

Returns true if the specified index corresponds to a fixed stack object.

int getObjectIndexBegin() const

Return the minimum frame object index.

void setOffsetAdjustment(int64_t Adj)

Set the correction for frame offsets.

const WinEHFuncInfo * getWinEHFuncInfo() const

getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.

unsigned addFrameInst(const MCCFIInstruction &Inst)

void setHasWinCFI(bool v)

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

const std::vector< MCCFIInstruction > & getFrameInstructions() const

Returns a reference to a list of cfi instructions in the function's prologue.

bool hasInlineAsm() const

Returns true if the function contains any inline assembly.

void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair, unsigned SubReg=0)

Create a substitution between one <instr,operand> value to a different, new value.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

bool needsFrameMoves() const

True if this function needs frame moves for debug or exceptions.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

bool callsUnwindInit() const

void push_front(MachineBasicBlock *MBB)

const char * createExternalSymbolName(StringRef Name)

Allocate a string and populate it with the given external symbol name.

MCContext & getContext() const

bool callsEHReturn() const

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const

Run the current MachineFunction through the machine code verifier, useful for debugger use.

Function & getFunction()

Return the LLVM function that this machine code represents.

const std::vector< LandingPadInfo > & getLandingPads() const

Return a reference to the landing pad info for the current function.

BasicBlockListType::iterator iterator

bool shouldSplitStack() const

Should we be emitting segmented stack stuff for the function.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

const MachineBasicBlock & front() const

bool hasEHFunclets() const

MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)

CreateMachineInstr - Allocate a new MachineInstr.

void insert(iterator MBBI, MachineBasicBlock *MBB)

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const

const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const

const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register use operand.

const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const

Representation of each machine instruction.

unsigned getNumOperands() const

Retuns the total number of operands.

const DebugLoc & getDebugLoc() const

Returns the debug location id of this MachineInstr.

LLVM_ABI unsigned getDebugInstrNum()

Fetch the instruction number of this MachineInstr.

const MachineOperand & getOperand(unsigned i) const

@ MOVolatile

The memory access is volatile.

@ MOLoad

The memory access reads data.

@ MOStore

The memory access writes data.

MachineOperand class - Representation of each machine instruction operand.

const GlobalValue * getGlobal() const

MachineBasicBlock * getMBB() const

void setIsDead(bool Val=true)

bool isGlobal() const

isGlobal - Tests if this is a MO_GlobalAddress operand.

static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)

clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

bool isReserved(MCRegister PhysReg) const

isReserved - Returns true when PhysReg is a reserved register.

LLVM_ABI bool isLiveIn(Register Reg) const

NamedMDNode * getNamedMetadata(StringRef Name) const

Return the first NamedMDNode in the module with the specified name.

unsigned getCodeViewFlag() const

Returns the CodeView Version by checking module flags.

MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...

LLVM_ABI MDNode * getOperand(unsigned i) const

LLVM_ABI unsigned getNumOperands() const

Wrapper class representing virtual and physical registers.

constexpr bool isValid() const

SlotIndex - An opaque wrapper around machine indexes.

SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...

void append(StringRef RHS)

Append from a StringRef.

StringRef str() const

Explicit conversion to StringRef.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

StackOffset holds a fixed and a scalable offset in bytes.

int64_t getFixed() const

Returns the fixed component of the stack.

static StackOffset getFixed(int64_t Fixed)

StringRef - Represent a constant reference to a string, i.e.

static constexpr size_t npos

unsigned getStackAlignment() const

getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...

bool hasFP(const MachineFunction &MF) const

hasFP - Return true if the specified function should have a dedicated frame pointer register.

virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const

This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...

int getOffsetOfLocalArea() const

getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...

TargetFrameLowering(StackDirection D, Align StackAl, int LAO, Align TransAl=Align(1), bool StackReal=true)

Align getStackAlign() const

getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...

const Triple & getTargetTriple() const

CodeModel::Model getCodeModel() const

Returns the code model.

const MCAsmInfo * getMCAsmInfo() const

Return target specific asm information.

SwiftAsyncFramePointerMode SwiftAsyncFramePointer

Control when and how the Swift async frame pointer bit should be set.

LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const

DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

virtual Register getFrameRegister(const MachineFunction &MF) const =0

Debug information queries.

virtual const TargetFrameLowering * getFrameLowering() const

virtual const TargetRegisterInfo * getRegisterInfo() const =0

Return the target's register information.

bool isUEFI() const

Tests whether the OS is UEFI.

bool isOSWindows() const

Tests whether the OS is Windows.

bool has128ByteRedZone(const MachineFunction &MF) const

Return true if the function has a redzone (accessible bytes past the frame of the top of stack functi...

Definition X86FrameLowering.cpp:1453

void spillFPBP(MachineFunction &MF) const override

If a function uses base pointer and the base pointer is clobbered by inline asm, RA doesn't detect th...

Definition X86FrameLowering.cpp:4561

bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override

canSimplifyCallFramePseudos - If there is a reserved call frame, the call frame pseudos can be simpli...

Definition X86FrameLowering.cpp:74

bool needsFrameIndexResolution(const MachineFunction &MF) const override

Definition X86FrameLowering.cpp:89

X86FrameLowering(const X86Subtarget &STI, MaybeAlign StackAlignOverride)

Definition X86FrameLowering.cpp:49

const X86RegisterInfo * TRI

void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override

Definition X86FrameLowering.cpp:2407

bool hasFPImpl(const MachineFunction &MF) const override

hasFPImpl - Return true if the specified function should have a dedicated frame pointer register.

Definition X86FrameLowering.cpp:98

MachineBasicBlock::iterator restoreWin32EHStackPointers(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool RestoreSP=false) const

Sets up EBP and optionally ESI based on the incoming EBP value.

Definition X86FrameLowering.cpp:3966

int getInitialCFAOffset(const MachineFunction &MF) const override

Return initial CFA offset value i.e.

Definition X86FrameLowering.cpp:4029

bool canUseAsPrologue(const MachineBasicBlock &MBB) const override

Check whether or not the given MBB can be used as a prologue for the target.

Definition X86FrameLowering.cpp:3907

bool hasReservedCallFrame(const MachineFunction &MF) const override

hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...

Definition X86FrameLowering.cpp:64

void emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog, std::optional< MachineFunction::DebugInstrOperandPair > InstrNum=std::nullopt) const

Emit target stack probe code.

Definition X86FrameLowering.cpp:638

void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override

processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...

Definition X86FrameLowering.cpp:4209

void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool IsPrologue) const

Definition X86FrameLowering.cpp:517

void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const override

This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...

Definition X86FrameLowering.cpp:3202

int64_t mergeSPAdd(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, int64_t AddOffset, bool doMergeWithPrevious) const

Equivalent to: mergeSPUpdates(MBB, MBBI, [AddOffset](int64_t Offset) { return AddOffset + Offset; }...

Definition X86FrameLowering.cpp:470

StackOffset getFrameIndexReferenceSP(const MachineFunction &MF, int FI, Register &SPReg, int Adjustment) const

Definition X86FrameLowering.cpp:2761

bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override

assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.

Definition X86FrameLowering.cpp:2853

bool enableShrinkWrapping(const MachineFunction &MF) const override

Returns true if the target will correctly handle shrink wrapping.

Definition X86FrameLowering.cpp:3950

StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override

getFrameIndexReference - This method should return the base register and offset used to reference a f...

Definition X86FrameLowering.cpp:2662

void inlineStackProbe(MachineFunction &MF, MachineBasicBlock &PrologMBB) const override

Replace a StackProbe inline-stub with the actual probe code inline.

Definition X86FrameLowering.cpp:659

bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override

restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...

Definition X86FrameLowering.cpp:3133

MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override

This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...

Definition X86FrameLowering.cpp:3789

void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &DL, int64_t NumBytes, bool InEpilogue) const

Emit a series of instructions to increment / decrement the stack pointer by a constant value.

Definition X86FrameLowering.cpp:232

bool canUseAsEpilogue(const MachineBasicBlock &MBB) const override

Check whether or not the given MBB can be used as a epilogue for the target.

Definition X86FrameLowering.cpp:3924

bool Is64Bit

Is64Bit implies that x86_64 instructions are available.

Register getInitialCFARegister(const MachineFunction &MF) const override

Return initial CFA register value i.e.

Definition X86FrameLowering.cpp:4034

bool Uses64BitFramePtr

True if the 64-bit frame or stack pointer should be used.

unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override

Definition X86FrameLowering.cpp:4197

void adjustForSegmentedStacks(MachineFunction &MF, MachineBasicBlock &PrologueMBB) const override

Adjust the prologue to have the function use segmented stacks.

Definition X86FrameLowering.cpp:3268

DwarfFrameBase getDwarfFrameBase(const MachineFunction &MF) const override

Return the frame base information to be encoded in the DWARF subprogram debug info.

Definition X86FrameLowering.cpp:4039

void emitCalleeSavedFrameMovesFullCFA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const override

Emits Dwarf Info specifying offsets of callee saved registers and frame pointer.

Definition X86FrameLowering.cpp:497

int getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, Register &SPReg) const

Definition X86FrameLowering.cpp:2745

bool canUseLEAForSPInEpilogue(const MachineFunction &MF) const

Check that LEA can be used on SP in an epilogue sequence for MF.

Definition X86FrameLowering.cpp:2322

bool stackProbeFunctionModifiesSP() const override

Does the stack probe function call return with a modified stack pointer?

Definition X86FrameLowering.cpp:655

void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override

Order the symbols in the local stack.

Definition X86FrameLowering.cpp:4125

void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const MCCFIInstruction &CFIInst, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const

Wraps up getting a CFI index and building a MachineInstr for it.

Definition X86FrameLowering.cpp:479

void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override

emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.

Definition X86FrameLowering.cpp:1587

void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override

processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...

Definition X86FrameLowering.cpp:4282

StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override

Same as getFrameIndexReference, except that the stack pointer (as opposed to the frame pointer) will ...

Definition X86FrameLowering.cpp:2771

void restoreWinEHStackPointersInParent(MachineFunction &MF) const

Definition X86FrameLowering.cpp:4296

bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override

spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...

Definition X86FrameLowering.cpp:3012

void adjustForHiPEPrologue(MachineFunction &MF, MachineBasicBlock &PrologueMBB) const override

Erlang programs may need a special prologue to handle the stack size they might need at runtime.

Definition X86FrameLowering.cpp:3576

X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...

bool getForceFramePointer() const

void setPadForPush2Pop2(bool V)

bool isCandidateForPush2Pop2(Register Reg) const

unsigned getArgumentStackSize() const

bool getFPClobberedByCall() const

int getRestoreBasePointerOffset() const

int getSEHFramePtrSaveIndex() const

bool hasCFIAdjustCfa() const

int getTCReturnAddrDelta() const

void setRestoreBasePointer(const MachineFunction *MF)

bool getHasSEHFramePtrSave() const

DenseMap< int, unsigned > & getWinEHXMMSlotInfo()

bool getBPClobberedByCall() const

void setUsesRedZone(bool V)

bool hasPreallocatedCall() const

bool hasSwiftAsyncContext() const

void setHasSEHFramePtrSave(bool V)

bool getRestoreBasePointer() const

MachineInstr * getStackPtrSaveMI() const

size_t getNumCandidatesForPush2Pop2() const

AMXProgModelEnum getAMXProgModel() const

void addCandidateForPush2Pop2(Register Reg)

unsigned getCalleeSavedFrameSize() const

bool getHasPushSequences() const

bool padForPush2Pop2() const

void setStackPtrSaveMI(MachineInstr *MI)

bool getUsesRedZone() const

void setCalleeSavedFrameSize(unsigned bytes)

void setSEHFramePtrSaveIndex(int Index)

const X86TargetLowering * getTargetLowering() const override

bool isTargetWindowsCoreCLR() const

self_iterator getIterator()

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Align[]

Key for Kernel::Arg::Metadata::mAlign.

uint16_t StackAdjustment(const RuntimeFunction &RF)

StackAdjustment - calculated stack adjustment in words.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ HiPE

Used by the High-Performance Erlang Compiler (HiPE).

@ X86_INTR

x86 hardware interrupt context.

@ Fast

Attempts to make calls as fast as possible (e.g.

@ Tail

Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...

@ X86_FastCall

'fast' analog of X86_StdCall.

@ BasicBlock

Various leaf nodes.

@ Implicit

Not emitted register (e.g. carry, or temporary result).

@ Define

Register definition.

@ Kill

The last use of a register.

@ Undef

Value of the register doesn't matter.

@ MO_GOTPCREL

MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...

This is an optimization pass for GlobalISel generic memory operations.

void stable_sort(R &&Range)

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

constexpr bool isInt(int64_t x)

Checks if an integer fits into the given bit width.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

bool isAligned(Align Lhs, uint64_t SizeInBytes)

Checks that SizeInBytes is a multiple of the alignment.

MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)

@ DwarfCFI

DWARF-like instruction based exceptions.

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)

addFrameReference - This function is used to add a reference to the base of an abstract object on the...

constexpr T alignDown(U Value, V Align, W Skew=0)

Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.

IterT skipDebugInstructionsForward(IterT It, IterT End, bool SkipPseudoOp=true)

Increment It until it points to a non-debug instruction or to End and return the resulting iterator.

auto dyn_cast_or_null(const Y &Val)

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

static bool isFuncletReturnInstr(const MachineInstr &MI)

auto reverse(ContainerTy &&C)

@ Always

Always set the bit.

@ Never

Never set the bit.

@ DeploymentBased

Determine whether to set the bit statically or dynamically based on the deployment target.

LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)

constexpr bool isUInt(uint64_t x)

Checks if an unsigned integer fits into the given bit width.

LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)

See if the given exception handling personality function is one that we understand.

IterT skipDebugInstructionsBackward(IterT It, IterT Begin, bool SkipPseudoOp=true)

Decrement It until it points to a non-debug instruction or to Begin and return the resulting iterator...

unsigned getUndefRegState(bool B)

unsigned getDefRegState(bool B)

unsigned getKillRegState(bool B)

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

bool isAsynchronousEHPersonality(EHPersonality Pers)

Returns true if this personality function catches asynchronous exceptions.

unsigned encodeSLEB128(int64_t Value, raw_ostream &OS, unsigned PadTo=0)

Utility function to encode a SLEB128 value to an output stream.

auto count_if(R &&Range, UnaryPredicate P)

Wrapper function around std::count_if to count the number of times an element satisfying a given pred...

auto find_if(R &&Range, UnaryPredicate P)

Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.

void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)

Convenience function combining computeLiveIns() and addLiveIns().

unsigned encodeULEB128(uint64_t Value, raw_ostream &OS, unsigned PadTo=0)

Utility function to encode a ULEB128 value to an output stream.

static const MachineInstrBuilder & addRegOffset(const MachineInstrBuilder &MIB, Register Reg, bool isKill, int Offset)

addRegOffset - This function is used to add a memory reference of the form [Reg + Offset],...

void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)

Convenience function for recomputing live-in's for a set of MBBs until the computation converges.

This struct is a compact representation of a valid (non-zero power of two) alignment.

constexpr uint64_t value() const

This is a hole in the type system and should not be abused.

Pair of physical register and lane mask.

This class contains a discriminated union of information about pointers in memory operands,...

static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.

union llvm::TargetFrameLowering::DwarfFrameBase::@004076321055032247336074224075335064105264310375 Location

enum llvm::TargetFrameLowering::DwarfFrameBase::FrameBaseKind Kind

SmallVector< WinEHTryBlockMapEntry, 4 > TryBlockMap

SmallVector< WinEHHandlerType, 1 > HandlerArray