LLVM: lib/Target/Hexagon/HexagonFrameLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

55#include

56#include

57#include

58#include

59#include

60#include

61#include

62#include

63#include

64

65#define DEBUG_TYPE "hexagon-pei"

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149using namespace llvm;

150

153

156 cl::desc("Set the number of scavenger slots"),

158

161 cl::desc("Specify O2(not Os) spill func threshold"),

163

166 cl::desc("Specify Os spill func threshold"),

168

170 "enable-stackovf-sanitizer", cl::Hidden,

171 cl::desc("Enable runtime checks for stack overflow."), cl::init(false));

172

175 cl::desc("Enable stack frame shrink wrapping"));

176

180 cl::desc("Max count of stack frame shrink-wraps"));

181

184 cl::desc("Enable long calls for save-restore stubs."),

186

189

192

193#ifndef NDEBUG

195 cl::init(std::numeric_limits::max()));

197#endif

198

199namespace llvm {

200

203

204}

205

206namespace {

207

209 public:

210 static char ID;

211

215 }

216

218

221 MachineFunctionProperties::Property::NoVRegs);

222 }

223 };

224

225 char HexagonCallFrameInformation::ID = 0;

226

227}

228

229bool HexagonCallFrameInformation::runOnMachineFunction(MachineFunction &MF) {

232

233 if (!NeedCFI)

234 return false;

235 HFI.insertCFIInstructions(MF);

236 return true;

237}

238

240 "Hexagon call frame information", false, false)

241

243 return new HexagonCallFrameInformation();

244}

245

246

247

250 bool hireg = true) {

251 if (Reg < Hexagon::D0 || Reg > Hexagon::D15)

252 return Reg;

253

256 if (hireg) {

259 } else {

260 if (!RegNo || SubReg < RegNo)

262 }

263 }

264 return RegNo;

265}

266

267

270 static_assert(Hexagon::R1 > 0,

271 "Assume physical registers are encoded as positive integers");

273 return 0;

274

276 for (unsigned I = 1, E = CSI.size(); I < E; ++I) {

278 if (Reg > Max)

279 Max = Reg;

280 }

281 return Max;

282}

283

284

285

289 if (MI.isCall())

290 return true;

291 unsigned Opc = MI.getOpcode();

292 switch (Opc) {

293 case Hexagon::PS_alloca:

294 case Hexagon::PS_aligna:

295 return true;

296 default:

297 break;

298 }

299

301

302

303

304

305

306 if (MO.isFI())

307 return true;

308 if (MO.isReg()) {

310

311 if (!R)

312 continue;

313

314

315 if (R.isVirtual())

316 return true;

317 for (MCPhysReg S : HRI.subregs_inclusive(R))

318 if (CSR[S])

319 return true;

320 continue;

321 }

322 if (MO.isRegMask()) {

323

324

325

326

327 const uint32_t *BM = MO.getRegMask();

329 unsigned R = x;

330

331 if (!(BM[R/32] & (1u << (R%32))))

332 return true;

333 }

334 }

335 }

336 }

337 return false;

338}

339

340

341

345 return false;

346 unsigned RetOpc = I->getOpcode();

347 return RetOpc == Hexagon::PS_tailcall_i || RetOpc == Hexagon::PS_tailcall_r;

348}

349

350

353 if (MI.isReturn())

354 return true;

355 return false;

356}

357

358

359

361 for (auto &I : MBB)

362 if (I.isReturn())

363 return &I;

364 return nullptr;

365}

366

368 switch (Opc) {

369 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:

370 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:

371 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT:

372 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC:

373 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT:

374 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC:

375 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4:

376 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC:

377 return true;

378 }

379 return false;

380}

381

385}

386

389 return F.hasOptSize() && F.hasMinSize();

390}

391

394}

395

396

397

398

399

400void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,

402 static unsigned ShrinkCounter = 0;

403

406 return;

409 return;

410 ShrinkCounter++;

411 }

412

414

419

422

423 UnsignedMap RPO;

424 RPOTType RPOT(&MF);

425 unsigned RPON = 0;

426 for (auto &I : RPOT)

427 RPO[I->getNumber()] = RPON++;

428

429

430

431

432 for (auto &I : MF) {

433 unsigned BN = RPO[I.getNumber()];

435

436 if (RPO[Succ->getNumber()] <= BN)

437 return;

438 }

439

440

441

443 BitVector CSR(Hexagon::NUM_TARGET_REGS);

444 for (const MCPhysReg *P = HRI.getCalleeSavedRegs(&MF); *P; ++P)

445 for (MCPhysReg S : HRI.subregs_inclusive(*P))

446 CSR[S] = true;

447

448 for (auto &I : MF)

451

453 dbgs() << "Blocks needing SF: {";

454 for (auto &B : SFBlocks)

456 dbgs() << " }\n";

457 });

458

459 if (SFBlocks.empty())

460 return;

461

462

464 for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) {

466 if (!DomB)

467 break;

468 }

470 for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) {

472 if (!PDomB)

473 break;

474 }

476 dbgs() << "Computed dom block: ";

477 if (DomB)

479 else

480 dbgs() << "";

481 dbgs() << ", computed pdom block: ";

482 if (PDomB)

484 else

485 dbgs() << "";

486 dbgs() << "\n";

487 });

488 if (!DomB || !PDomB)

489 return;

490

491

492 if (!MDT.dominates(DomB, PDomB)) {

493 LLVM_DEBUG(dbgs() << "Dom block does not dominate pdom block\n");

494 return;

495 }

496 if (!MPT.dominates(PDomB, DomB)) {

497 LLVM_DEBUG(dbgs() << "PDom block does not post-dominate dom block\n");

498 return;

499 }

500

501

502 PrologB = DomB;

503 EpilogB = PDomB;

504}

505

506

507

508

509

510

514

517

520 findShrunkPrologEpilog(MF, PrologB, EpilogB);

521

522 bool PrologueStubs = false;

523 insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs);

524 insertPrologueInBlock(*PrologB, PrologueStubs);

525 updateEntryPaths(MF, *PrologB);

526

527 if (EpilogB) {

528 insertCSRRestoresInBlock(*EpilogB, CSI, HRI);

529 insertEpilogueInBlock(*EpilogB);

530 } else {

531 for (auto &B : MF)

532 if (B.isReturnBlock())

533 insertCSRRestoresInBlock(B, CSI, HRI);

534

535 for (auto &B : MF)

536 if (B.isReturnBlock())

537 insertEpilogueInBlock(B);

538

539 for (auto &B : MF) {

540 if (B.empty())

541 continue;

544 continue;

545 for (auto &R : CSI)

547 }

548 }

549

550 if (EpilogB) {

551

552

553

555 BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1);

556 updateExitPaths(*EpilogB, *EpilogB, DoneT, DoneF, Path);

557 }

558}

559

560

561

565 assert(F.hasFnAttribute(Attribute::NoReturn) &&

566 F.getFunction().hasFnAttribute(Attribute::NoUnwind) &&

567 F.getFunction().hasFnAttribute(Attribute::UWTable));

568 (void)F;

569

570

572}

573

574

575

576

577

578

583 assert(!MFI.hasVarSizedObjects() &&

584 !HST.getRegisterInfo()->hasStackRealignment(MF));

585 return F.hasFnAttribute(Attribute::NoReturn) &&

586 F.hasFnAttribute(Attribute::NoUnwind) &&

587 F.hasFnAttribute(Attribute::UWTable) && HST.noreturnStackElim() &&

588 MFI.getStackSize() == 0;

589}

590

592 bool PrologueStubs) const {

597 auto &HRI = *HST.getRegisterInfo();

598

600

601

602

604

607

608 FrameSize = MaxCFA + alignTo(FrameSize, MaxAlign);

610

612

613

615 Register SP = HRI.getStackRegister();

618

620 for (auto &MBB : MF)

621 for (auto &MI : MBB)

622 if (MI.getOpcode() == Hexagon::PS_alloca)

624

625 for (auto *MI : AdjustRegs) {

626 assert((MI->getOpcode() == Hexagon::PS_alloca) && "Expected alloca");

627 expandAlloca(MI, HII, SP, MaxCF);

628 MI->eraseFromParent();

629 }

630

632

633 if (MF.getFunction().isVarArg() &&

635

637 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0)

638 ? NumVarArgRegs * 4

639 : NumVarArgRegs * 4 + 4;

640 if (RegisterSavedAreaSizePlusPadding > 0) {

641

642

643 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)

645 .addImm(-RegisterSavedAreaSizePlusPadding)

647

648 int NumBytes = 0;

649

651 for (int i = HMFI.getFirstNamedArgFrameIndex(),

652 e = HMFI.getLastNamedArgFrameIndex(); i >= e; --i) {

655

656

657 unsigned LDOpc, STOpc;

659

660

661 if (ObjAlign > ObjSize) {

662 if (ObjSize <= 1)

663 OpcodeChecker = 1;

664 else if (ObjSize <= 2)

665 OpcodeChecker = 2;

666 else if (ObjSize <= 4)

667 OpcodeChecker = 4;

668 else if (ObjSize > 4)

669 OpcodeChecker = 8;

670 }

671

672 switch (OpcodeChecker) {

673 case 1:

674 LDOpc = Hexagon::L2_loadrb_io;

675 STOpc = Hexagon::S2_storerb_io;

676 break;

677 case 2:

678 LDOpc = Hexagon::L2_loadrh_io;

679 STOpc = Hexagon::S2_storerh_io;

680 break;

681 case 4:

682 LDOpc = Hexagon::L2_loadri_io;

683 STOpc = Hexagon::S2_storeri_io;

684 break;

685 case 8:

686 default:

687 LDOpc = Hexagon::L2_loadrd_io;

688 STOpc = Hexagon::S2_storerd_io;

689 break;

690 }

691

692 Register RegUsed = LDOpc == Hexagon::L2_loadrd_io ? Hexagon::D3

693 : Hexagon::R6;

694 int LoadStoreCount = ObjSize / OpcodeChecker;

695

696 if (ObjSize % OpcodeChecker)

697 ++LoadStoreCount;

698

699

700

701

702

703 if (NumBytes != 0)

704 NumBytes = alignTo(NumBytes, ObjAlign);

705

706 int Count = 0;

707 while (Count < LoadStoreCount) {

708

709 BuildMI(MBB, InsertPt, dl, HII.get(LDOpc), RegUsed)

711 .addImm(RegisterSavedAreaSizePlusPadding +

712 ObjAlign.value() * Count + NumBytes)

714

715

716 BuildMI(MBB, InsertPt, dl, HII.get(STOpc))

718 .addImm(ObjAlign.value() * Count + NumBytes)

721

722 Count++;

723 }

725 }

726

727

728 NumBytes = alignTo(NumBytes, 8);

729

730

731

732

733 NumBytes = (NumVarArgRegs % 2 == 0) ? NumBytes : NumBytes + 4;

734

736 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_storeri_io))

738 .addImm(NumBytes + 4 * i)

739 .addReg(Hexagon::R0 + j)

741 }

742 }

743 }

744

746 insertAllocframe(MBB, InsertPt, NumBytes);

747 if (AlignStack) {

748 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_andir), SP)

751 }

752

753

754

756 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::PS_call_stk))

758 } else if (NumBytes > 0) {

760 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)

762 .addImm(-int(NumBytes));

763 }

764}

765

766void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &MBB) const {

770 auto &HRI = *HST.getRegisterInfo();

771 Register SP = HRI.getStackRegister();

772

775

776 if (hasFP(MF)) {

781

783 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0) ?

784 (NumVarArgRegs * 4) : (NumVarArgRegs * 4 + 4);

785 NumBytes += RegisterSavedAreaSizePlusPadding;

786 }

787 if (NumBytes) {

788 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)

791 }

792 return;

793 }

794

796 unsigned RetOpc = RetI ? RetI->getOpcode() : 0;

797

798

799 if (RetOpc == Hexagon::EH_RETURN_JMPR) {

800 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))

801 .addDef(Hexagon::D15)

802 .addReg(Hexagon::R30);

803 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_add), SP)

805 .addReg(Hexagon::R28);

806 return;

807 }

808

809

810

811 if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 ||

812 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC ||

813 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT ||

814 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC) {

816 ++It;

817

818 while (It != MBB.end()) {

819 if (!It->isLabel())

821 else

822 ++It;

823 }

824 return;

825 }

826

827

828

829

830 bool NeedsDeallocframe = true;

833 unsigned COpc = PrevIt->getOpcode();

834 if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 ||

835 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC ||

836 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT ||

837 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC ||

838 COpc == Hexagon::PS_call_nr || COpc == Hexagon::PS_callr_nr)

839 NeedsDeallocframe = false;

840 }

841

844 if (!NeedsDeallocframe)

845 return;

846

847

848

850 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))

851 .addDef(Hexagon::D15)

852 .addReg(Hexagon::R30);

853 return;

854 }

855 unsigned NewOpc = Hexagon::L4_return;

857 .addDef(Hexagon::D15)

858 .addReg(Hexagon::R30);

859

862 } else {

863

864

866 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0) ?

867 (NumVarArgRegs * 4) : (NumVarArgRegs * 4 + 4);

868

871 : std::prev(Term);

873 (I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT &&

874 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC &&

875 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 &&

876 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC))

877 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))

878 .addDef(Hexagon::D15)

879 .addReg(Hexagon::R30);

880 if (RegisterSavedAreaSizePlusPadding != 0)

881 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)

883 .addImm(RegisterSavedAreaSizePlusPadding);

884 }

885}

886

892 auto &HRI = *HST.getRegisterInfo();

893

894

895

896 const unsigned int ALLOCFRAME_MAX = 16384;

897

898

899

902

904 Register SP = HRI.getStackRegister();

905

906 if (NumBytes >= ALLOCFRAME_MAX) {

907

908 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))

914

915

916 Register SP = HRI.getStackRegister();

917 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)

919 .addImm(-int(NumBytes))

921 } else {

922 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))

928 }

929}

930

931void HexagonFrameLowering::updateEntryPaths(MachineFunction &MF,

934

937

938 unsigned SaveN = SaveB.getNumber();

940

941 for (unsigned i = 0; i < Worklist.size(); ++i) {

942 unsigned BN = Worklist[i];

944 for (auto &R : CSI)

947 if (BN != SaveN)

949 Worklist.insert(SB->getNumber());

950 }

951}

952

958 if (Path[BN] || DoneF[BN])

959 return false;

960 if (DoneT[BN])

961 return true;

962

964

965 Path[BN] = true;

966 bool ReachedExit = false;

968 ReachedExit |= updateExitPaths(*SB, RestoreB, DoneT, DoneF, Path);

969

971

972

973

976 for (auto &R : CSI)

978 ReachedExit = true;

979 }

980

981

982

983

984 if (ReachedExit && &MBB != &RestoreB) {

985 for (auto &R : CSI)

988 DoneT[BN] = true;

989 }

990 if (!ReachedExit)

991 DoneF[BN] = true;

992

993 Path[BN] = false;

994 return ReachedExit;

995}

996

997static std::optionalMachineBasicBlock::iterator

999

1000

1001

1002

1003

1004 auto End = B.instr_end();

1005

1008 if (I.isBundle()) {

1009 if (I.getOpcode() == Hexagon::S2_allocframe)

1010 return std::next(It);

1011 continue;

1012 }

1013

1014 bool HasCall = false, HasAllocFrame = false;

1016 while (++T != End && T->isBundled()) {

1017 if (T->getOpcode() == Hexagon::S2_allocframe)

1018 HasAllocFrame = true;

1019 else if (T->isCall())

1020 HasCall = true;

1021 }

1022 if (HasAllocFrame)

1023 return HasCall ? It : std::next(It);

1024 }

1025 return std::nullopt;

1026}

1027

1029 for (auto &B : MF)

1031 insertCFIInstructionsAt(B, *At);

1032}

1033

1040 auto &HRI = *HST.getRegisterInfo();

1041

1042

1043

1044

1046 const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION);

1047

1049 bool HasFP = hasFP(MF);

1050

1051 if (HasFP) {

1052 unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(), true);

1053 unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(), true);

1054

1055

1056

1057

1058

1059

1060

1061

1062

1063

1064

1065

1069

1073

1077 }

1078

1079 static Register RegsToMove[] = {

1080 Hexagon::R1, Hexagon::R0, Hexagon::R3, Hexagon::R2,

1081 Hexagon::R17, Hexagon::R16, Hexagon::R19, Hexagon::R18,

1082 Hexagon::R21, Hexagon::R20, Hexagon::R23, Hexagon::R22,

1083 Hexagon::R25, Hexagon::R24, Hexagon::R27, Hexagon::R26,

1084 Hexagon::D0, Hexagon::D1, Hexagon::D8, Hexagon::D9,

1085 Hexagon::D10, Hexagon::D11, Hexagon::D12, Hexagon::D13,

1086 Hexagon::NoRegister

1087 };

1088

1090

1091 for (unsigned i = 0; RegsToMove[i] != Hexagon::NoRegister; ++i) {

1094 return C.getReg() == Reg;

1095 };

1097 if (F == CSI.end())

1098 continue;

1099

1101 if (HasFP) {

1102

1103

1104

1105

1106

1107

1108

1110 } else {

1114 }

1115

1117

1118 if (Reg < Hexagon::D0 || Reg > Hexagon::D15) {

1119 unsigned DwarfReg = HRI.getDwarfRegNum(Reg, true);

1124 } else {

1125

1126

1127

1128

1129

1130

1131 Register HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi);

1132 Register LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo);

1133 unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg, true);

1134 unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg, true);

1143 }

1144 }

1145}

1146

1150 bool HasExtraAlign = HRI.hasStackRealignment(MF);

1152

1153

1154

1155

1156

1157

1159 return true;

1160

1161

1162

1163

1164

1165

1166 if (HasAlloca || HasExtraAlign)

1167 return true;

1168

1170

1173 return true;

1175 return true;

1176 }

1177

1180 return true;

1181

1182 return false;

1183}

1184

1190

1192 bool Stkchk = false) {

1193 const char * V4SpillToMemoryFunctions[] = {

1194 "__save_r16_through_r17",

1195 "__save_r16_through_r19",

1196 "__save_r16_through_r21",

1197 "__save_r16_through_r23",

1198 "__save_r16_through_r25",

1199 "__save_r16_through_r27" };

1200

1201 const char * V4SpillToMemoryStkchkFunctions[] = {

1202 "__save_r16_through_r17_stkchk",

1203 "__save_r16_through_r19_stkchk",

1204 "__save_r16_through_r21_stkchk",

1205 "__save_r16_through_r23_stkchk",

1206 "__save_r16_through_r25_stkchk",

1207 "__save_r16_through_r27_stkchk" };

1208

1209 const char * V4SpillFromMemoryFunctions[] = {

1210 "__restore_r16_through_r17_and_deallocframe",

1211 "__restore_r16_through_r19_and_deallocframe",

1212 "__restore_r16_through_r21_and_deallocframe",

1213 "__restore_r16_through_r23_and_deallocframe",

1214 "__restore_r16_through_r25_and_deallocframe",

1215 "__restore_r16_through_r27_and_deallocframe" };

1216

1217 const char * V4SpillFromMemoryTailcallFunctions[] = {

1218 "__restore_r16_through_r17_and_deallocframe_before_tailcall",

1219 "__restore_r16_through_r19_and_deallocframe_before_tailcall",

1220 "__restore_r16_through_r21_and_deallocframe_before_tailcall",

1221 "__restore_r16_through_r23_and_deallocframe_before_tailcall",

1222 "__restore_r16_through_r25_and_deallocframe_before_tailcall",

1223 "__restore_r16_through_r27_and_deallocframe_before_tailcall"

1224 };

1225

1226 const char **SpillFunc = nullptr;

1227

1228 switch(SpillType) {

1230 SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions

1231 : V4SpillToMemoryFunctions;

1232 break;

1234 SpillFunc = V4SpillFromMemoryFunctions;

1235 break;

1237 SpillFunc = V4SpillFromMemoryTailcallFunctions;

1238 break;

1239 }

1240 assert(SpillFunc && "Unknown spill kind");

1241

1242

1243 switch (MaxReg) {

1244 case Hexagon::R17:

1245 return SpillFunc[0];

1246 case Hexagon::R19:

1247 return SpillFunc[1];

1248 case Hexagon::R21:

1249 return SpillFunc[2];

1250 case Hexagon::R23:

1251 return SpillFunc[3];

1252 case Hexagon::R25:

1253 return SpillFunc[4];

1254 case Hexagon::R27:

1255 return SpillFunc[5];

1256 default:

1258 }

1259 return nullptr;

1260}

1261

1267

1270 bool HasExtraAlign = HRI.hasStackRealignment(MF);

1272

1275 Register SP = HRI.getStackRegister();

1276 Register FP = HRI.getFrameRegister();

1277 Register AP = HMFI.getStackAlignBaseReg();

1278

1279

1280

1281

1282

1283

1284

1285

1286

1287

1288

1289

1290

1291

1292 bool UseFP = false, UseAP = false;

1293

1294

1295

1296

1297 if (NoOpt && !HasExtraAlign)

1298 UseFP = true;

1300

1301

1302 UseFP |= (HasAlloca || HasExtraAlign);

1303 } else {

1304 if (HasAlloca) {

1305 if (HasExtraAlign)

1306 UseAP = true;

1307 else

1308 UseFP = true;

1309 }

1310 }

1311

1312

1313 bool HasFP = hasFP(MF);

1314 assert((HasFP || !UseFP) && "This function must have frame pointer");

1315

1316

1317

1318

1319

1320

1321

1322

1323

1324

1325

1326

1327

1328

1329

1330

1331

1332

1333

1334

1335

1336

1337

1338

1339

1340 if (Offset > 0 && !HasFP)

1342

1343 if (UseFP)

1344 FrameReg = FP;

1345 else if (UseAP)

1346 FrameReg = AP;

1347 else

1348 FrameReg = SP;

1349

1350

1351

1352

1353

1354 int RealOffset = Offset;

1355 if (!UseFP && !UseAP)

1356 RealOffset = FrameSize+Offset;

1358}

1359

1362 bool &PrologueStubs) const {

1363 if (CSI.empty())

1364 return true;

1365

1367 PrologueStubs = false;

1371

1372 if (useSpillFunction(MF, CSI)) {

1373 PrologueStubs = true;

1377 StkOvrFlowEnabled);

1381

1382

1384 unsigned SpillOpc;

1385 if (StkOvrFlowEnabled) {

1386 if (LongCalls)

1387 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT_PIC

1388 : Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT;

1389 else

1390 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC

1391 : Hexagon::SAVE_REGISTERS_CALL_V4STK;

1392 } else {

1393 if (LongCalls)

1394 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC

1395 : Hexagon::SAVE_REGISTERS_CALL_V4_EXT;

1396 else

1397 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC

1398 : Hexagon::SAVE_REGISTERS_CALL_V4;

1399 }

1400

1404

1405

1406 addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI, false, true);

1407

1410 return true;

1411 }

1412

1415

1416

1417

1419 int FI = I.getFrameIdx();

1421 HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, &HRI, Register());

1422 if (IsKill)

1424 }

1425 return true;

1426}

1427

1430 if (CSI.empty())

1431 return false;

1432

1437

1438 if (useRestoreFunction(MF, CSI)) {

1444 bool IsPIC = HTM.isPositionIndependent();

1446

1447

1451

1452 if (HasTC) {

1453 unsigned RetOpc;

1454 if (LongCalls)

1455 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC

1456 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT;

1457 else

1458 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC

1459 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;

1462 } else {

1463

1465 assert(It->isReturn() && std::next(It) == MBB.end());

1466 unsigned RetOpc;

1467 if (LongCalls)

1468 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC

1469 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT;

1470 else

1471 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC

1472 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4;

1473 DeallocCall = BuildMI(MBB, It, DL, HII.get(RetOpc))

1475

1477 }

1478 addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI, true, false);

1479 return true;

1480 }

1481

1485 int FI = I.getFrameIdx();

1486 HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI, Register());

1487 }

1488

1489 return true;

1490}

1491

1496 unsigned Opc = MI.getOpcode();

1497 (void)Opc;

1498 assert((Opc == Hexagon::ADJCALLSTACKDOWN || Opc == Hexagon::ADJCALLSTACKUP) &&

1499 "Cannot handle this call frame pseudo instruction");

1501}

1502

1505

1506

1507

1508

1512

1513 if (!HasAlloca || !NeedsAlign)

1514 return;

1515

1516

1519 AP = AI->getOperand(0).getReg();

1522 HMFI.setStackAlignBaseReg(AP);

1523}

1524

1525

1529

1530 auto IsUsed = [&HRI,&MRI] (Register Reg) -> bool {

1532 if (MRI.isPhysRegUsed(*AI))

1533 return true;

1534 return false;

1535 };

1536

1537

1538

1540 if (!IsUsed(*P))

1541 return false;

1542

1543

1544 return true;

1545}

1546

1547#ifndef NDEBUG

1549 dbgs() << '{';

1553 }

1554 dbgs() << " }";

1555}

1556#endif

1557

1562 BitVector SRegs(Hexagon::NUM_TARGET_REGS);

1563

1564

1565

1566

1567

1568

1569

1574 for (MCPhysReg SR : TRI->subregs_inclusive(R))

1575 SRegs[SR] = true;

1576 }

1579 dbgs() << "\n");

1580

1581

1582

1584

1585

1590

1592 bool HasResSub = false;

1595 continue;

1596 HasResSub = true;

1597 break;

1598 }

1599 if (!HasResSub)

1601 }

1602 }

1603

1604 for (int x = Reserved.find_first(); x >= 0; x = Reserved.find_next(x)) {

1606 for (MCPhysReg SR : TRI->superregs_inclusive(R))

1607 SRegs[SR] = false;

1608 }

1610 dbgs() << "\n");

1612 dbgs() << "\n");

1613

1614

1615

1616

1617

1618 BitVector TmpSup(Hexagon::NUM_TARGET_REGS);

1622 TmpSup[SR] = true;

1623 }

1624 for (int x = TmpSup.find_first(); x >= 0; x = TmpSup.find_next(x)) {

1626 for (MCPhysReg SR : TRI->subregs_inclusive(R)) {

1628 continue;

1629 TmpSup[R] = false;

1630 break;

1631 }

1632 }

1634 dbgs() << "\n");

1635

1636

1637 SRegs |= TmpSup;

1639 dbgs() << "\n");

1640

1641

1642

1646 if (!SRegs[SR])

1647 continue;

1648 SRegs[R] = false;

1649 break;

1650 }

1651 }

1653 dbgs() << "\n");

1654

1655

1656

1657 CSI.clear();

1658

1660

1661 unsigned NumFixed;

1662 int64_t MinOffset = 0;

1664 for (const SpillSlot *S = FixedSlots; S != FixedSlots+NumFixed; ++S) {

1665 if (!SRegs[S->Reg])

1666 continue;

1669 MinOffset = std::min(MinOffset, S->Offset);

1671 SRegs[S->Reg] = false;

1672 }

1673

1674

1675

1676

1680 unsigned Size = TRI->getSpillSize(*RC);

1681 int64_t Off = MinOffset - Size;

1683 Off &= -Alignment.value();

1685 MinOffset = std::min(MinOffset, Off);

1687 SRegs[R] = false;

1688 }

1689

1691 dbgs() << "CS information: {";

1693 int FI = I.getFrameIdx();

1695 dbgs() << ' ' << printReg(I.getReg(), TRI) << ":fi#" << FI << ":sp";

1696 if (Off >= 0)

1697 dbgs() << '+';

1698 dbgs() << Off;

1699 }

1700 dbgs() << " }\n";

1701 });

1702

1703#ifndef NDEBUG

1704

1705 bool MissedReg = false;

1709 MissedReg = true;

1710 }

1711 if (MissedReg)

1712 llvm_unreachable("...there are unhandled callee-saved registers!");

1713#endif

1714

1715 return true;

1716}

1717

1723 Register DstR = MI->getOperand(0).getReg();

1724 Register SrcR = MI->getOperand(1).getReg();

1725 if (!Hexagon::ModRegsRegClass.contains(DstR) ||

1726 !Hexagon::ModRegsRegClass.contains(SrcR))

1727 return false;

1728

1729 Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);

1730 BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR).add(MI->getOperand(1));

1731 BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR)

1733

1735 B.erase(It);

1736 return true;

1737}

1738

1743 if (MI->getOperand(0).isFI())

1744 return false;

1745

1747 unsigned Opc = MI->getOpcode();

1748 Register SrcR = MI->getOperand(2).getReg();

1749 bool IsKill = MI->getOperand(2).isKill();

1750 int FI = MI->getOperand(0).getIndex();

1751

1752

1753

1754 Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);

1755 unsigned TfrOpc = (Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr

1756 : Hexagon::A2_tfrcrr;

1757 BuildMI(B, It, DL, HII.get(TfrOpc), TmpR)

1759

1760

1761 BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io))

1766

1768 B.erase(It);

1769 return true;

1770}

1771

1776 if (MI->getOperand(1).isFI())

1777 return false;

1778

1780 unsigned Opc = MI->getOpcode();

1781 Register DstR = MI->getOperand(0).getReg();

1782 int FI = MI->getOperand(1).getIndex();

1783

1784

1785 Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);

1786 BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR)

1790

1791

1792

1793 unsigned TfrOpc = (Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp

1794 : Hexagon::A2_tfrrcr;

1795 BuildMI(B, It, DL, HII.get(TfrOpc), DstR)

1797

1799 B.erase(It);

1800 return true;

1801}

1802

1807 if (MI->getOperand(0).isFI())

1808 return false;

1809

1811 Register SrcR = MI->getOperand(2).getReg();

1812 bool IsKill = MI->getOperand(2).isKill();

1813 int FI = MI->getOperand(0).getIndex();

1814 auto *RC = &Hexagon::HvxVRRegClass;

1815

1816

1817

1818

1819

1820 Register TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);

1821 Register TmpR1 = MRI.createVirtualRegister(RC);

1822

1823 BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)

1824 .addImm(0x01010101);

1825

1826 BuildMI(B, It, DL, HII.get(Hexagon::V6_vandqrt), TmpR1)

1829

1830 auto *HRI = B.getParent()->getSubtarget<HexagonSubtarget>().getRegisterInfo();

1832 expandStoreVec(B, std::prev(It), MRI, HII, NewRegs);

1833

1836 B.erase(It);

1837 return true;

1838}

1839

1844 if (MI->getOperand(1).isFI())

1845 return false;

1846

1848 Register DstR = MI->getOperand(0).getReg();

1849 int FI = MI->getOperand(1).getIndex();

1850 auto *RC = &Hexagon::HvxVRRegClass;

1851

1852

1853

1854

1855 Register TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);

1856 Register TmpR1 = MRI.createVirtualRegister(RC);

1857

1858 BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)

1859 .addImm(0x01010101);

1863 expandLoadVec(B, std::prev(It), MRI, HII, NewRegs);

1864

1865 BuildMI(B, It, DL, HII.get(Hexagon::V6_vandvrt), DstR)

1868

1871 B.erase(It);

1872 return true;

1873}

1874

1882 if (MI->getOperand(0).isFI())

1883 return false;

1884

1885

1886

1887

1888

1890 LPR.addLiveIns(B);

1892 for (auto R = B.begin(); R != It; ++R) {

1893 Clobbers.clear();

1894 LPR.stepForward(*R, Clobbers);

1895 }

1896

1898 Register SrcR = MI->getOperand(2).getReg();

1899 Register SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo);

1900 Register SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi);

1901 bool IsKill = MI->getOperand(2).isKill();

1902 int FI = MI->getOperand(0).getIndex();

1903

1904 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);

1905 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);

1907 unsigned StoreOpc;

1908

1909

1910 if (LPR.contains(SrcLo)) {

1911 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai

1912 : Hexagon::V6_vS32Ub_ai;

1913 BuildMI(B, It, DL, HII.get(StoreOpc))

1918 }

1919

1920

1921 if (LPR.contains(SrcHi)) {

1922 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai

1923 : Hexagon::V6_vS32Ub_ai;

1924 BuildMI(B, It, DL, HII.get(StoreOpc))

1929 }

1930

1931 B.erase(It);

1932 return true;

1933}

1934

1942 if (MI->getOperand(1).isFI())

1943 return false;

1944

1946 Register DstR = MI->getOperand(0).getReg();

1947 Register DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi);

1948 Register DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo);

1949 int FI = MI->getOperand(1).getIndex();

1950

1951 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);

1952 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);

1954 unsigned LoadOpc;

1955

1956

1957 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai

1958 : Hexagon::V6_vL32Ub_ai;

1959 BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)

1963

1964

1965 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai

1966 : Hexagon::V6_vL32Ub_ai;

1967 BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)

1971

1972 B.erase(It);

1973 return true;

1974}

1975

1982 if (MI->getOperand(0).isFI())

1983 return false;

1984

1987 Register SrcR = MI->getOperand(2).getReg();

1988 bool IsKill = MI->getOperand(2).isKill();

1989 int FI = MI->getOperand(0).getIndex();

1990

1991 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);

1993 unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai

1994 : Hexagon::V6_vS32Ub_ai;

1995 BuildMI(B, It, DL, HII.get(StoreOpc))

2000

2001 B.erase(It);

2002 return true;

2003}

2004

2011 if (MI->getOperand(1).isFI())

2012 return false;

2013

2016 Register DstR = MI->getOperand(0).getReg();

2017 int FI = MI->getOperand(1).getIndex();

2018

2019 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);

2021 unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai

2022 : Hexagon::V6_vL32Ub_ai;

2023 BuildMI(B, It, DL, HII.get(LoadOpc), DstR)

2027

2028 B.erase(It);

2029 return true;

2030}

2031

2032bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF,

2036 bool Changed = false;

2037

2038 for (auto &B : MF) {

2039

2041 for (auto I = B.begin(), E = B.end(); I != E; I = NextI) {

2043 NextI = std::next(I);

2044 unsigned Opc = MI->getOpcode();

2045

2046 switch (Opc) {

2047 case TargetOpcode::COPY:

2048 Changed |= expandCopy(B, I, MRI, HII, NewRegs);

2049 break;

2050 case Hexagon::STriw_pred:

2051 case Hexagon::STriw_ctr:

2052 Changed |= expandStoreInt(B, I, MRI, HII, NewRegs);

2053 break;

2054 case Hexagon::LDriw_pred:

2055 case Hexagon::LDriw_ctr:

2056 Changed |= expandLoadInt(B, I, MRI, HII, NewRegs);

2057 break;

2058 case Hexagon::PS_vstorerq_ai:

2059 Changed |= expandStoreVecPred(B, I, MRI, HII, NewRegs);

2060 break;

2061 case Hexagon::PS_vloadrq_ai:

2062 Changed |= expandLoadVecPred(B, I, MRI, HII, NewRegs);

2063 break;

2064 case Hexagon::PS_vloadrw_ai:

2065 Changed |= expandLoadVec2(B, I, MRI, HII, NewRegs);

2066 break;

2067 case Hexagon::PS_vstorerw_ai:

2068 Changed |= expandStoreVec2(B, I, MRI, HII, NewRegs);

2069 break;

2070 }

2071 }

2072 }

2073

2074 return Changed;

2075}

2076

2081

2082 SavedRegs.resize(HRI.getNumRegs());

2083

2084

2085

2088 SavedRegs.set(*R);

2089

2090

2092 expandSpillMacros(MF, NewRegs);

2094 optimizeSpillSlots(MF, NewRegs);

2095

2096

2097

2098 if (!NewRegs.empty() || mayOverflowFrameOffset(MF)) {

2102

2103

2104 SpillRCs.insert(&Hexagon::IntRegsRegClass);

2105

2107 SpillRCs.insert(MRI.getRegClass(VR));

2108

2109 for (const auto *RC : SpillRCs) {

2111 continue;

2112 unsigned Num = 1;

2113 switch (RC->getID()) {

2114 case Hexagon::IntRegsRegClassID:

2116 break;

2117 case Hexagon::HvxQRRegClassID:

2118 Num = 2;

2119 break;

2120 }

2121 unsigned S = HRI.getSpillSize(*RC);

2122 Align A = HRI.getSpillAlign(*RC);

2123 for (unsigned i = 0; i < Num; i++) {

2126 }

2127 }

2128 }

2129

2131}

2132

2140

2141 auto isDead = [&FIR,&DeadMap] (Register Reg) -> bool {

2142 auto F = DeadMap.find({Reg,0});

2143 if (F == DeadMap.end())

2144 return false;

2145 for (auto &DR : F->second)

2146 if (DR.contains(FIR))

2147 return true;

2148 return false;

2149 };

2150

2152 bool Dead = true;

2155 continue;

2156 Dead = false;

2157 break;

2158 }

2159 if (Dead)

2160 return Reg;

2161 }

2162 return 0;

2163}

2164

2165void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,

2168 auto &HII = *HST.getInstrInfo();

2169 auto &HRI = *HST.getRegisterInfo();

2172

2173 using BlockIndexMap =

2174 std::map<MachineBasicBlock *, HexagonBlockRanges::InstrIndexMap>;

2175 using BlockRangeMap =

2176 std::map<MachineBasicBlock *, HexagonBlockRanges::RangeList>;

2178

2179 struct SlotInfo {

2180 BlockRangeMap Map;

2181 unsigned Size = 0;

2183

2184 SlotInfo() = default;

2185 };

2186

2187 BlockIndexMap BlockIndexes;

2189 std::map<int,SlotInfo> FIRangeMap;

2190

2191

2192

2193

2194

2195 auto getCommonRC =

2198 if (HaveRC == nullptr || HaveRC == NewRC)

2199 return NewRC;

2200

2202 return HaveRC;

2203 if (NewRC->hasSubClassEq(HaveRC))

2204 return NewRC;

2205 return nullptr;

2206 };

2207

2208

2209

2210 for (auto &B : MF) {

2211 std::map<int,IndexType> LastStore, LastLoad;

2212

2213

2214 auto P = BlockIndexes.insert(

2216 auto &IndexMap = P.first->second;

2218 << IndexMap << '\n');

2219

2220 for (auto &In : B) {

2221 int LFI, SFI;

2222 bool Load = HII.isLoadFromStackSlot(In, LFI) && !HII.isPredicated(In);

2223 bool Store = HII.isStoreToStackSlot(In, SFI) && !HII.isPredicated(In);

2224 if (Load && Store) {

2225

2228 continue;

2229 }

2230

2231

2232

2233

2234

2235

2236 if (Load || Store) {

2237 int TFI = Load ? LFI : SFI;

2238 unsigned AM = HII.getAddrMode(In);

2239 SlotInfo &SI = FIRangeMap[TFI];

2241 if (!Bad) {

2242

2243 unsigned OpNum = Load ? 0 : 2;

2244 auto *RC = HII.getRegClass(In.getDesc(), OpNum, &HRI, MF);

2245 RC = getCommonRC(SI.RC, RC);

2246 if (RC == nullptr)

2247 Bad = true;

2248 else

2249 SI.RC = RC;

2250 }

2251 if (!Bad) {

2252

2253 unsigned S = HII.getMemAccessSize(In);

2254 if (SI.Size != 0 && SI.Size != S)

2255 Bad = true;

2256 else

2257 SI.Size = S;

2258 }

2259 if (!Bad) {

2260 for (auto *Mo : In.memoperands()) {

2261 if (!Mo->isVolatile() && !Mo->isAtomic())

2262 continue;

2263 Bad = true;

2264 break;

2265 }

2266 }

2267 if (Bad)

2269 }

2270

2271

2272 for (unsigned i = 0, n = In.getNumOperands(); i < n; ++i) {

2274 if (Op.isFI())

2275 continue;

2276 int FI = Op.getIndex();

2277

2278

2279 if (i+1 >= n || In.getOperand(i+1).isImm() ||

2280 In.getOperand(i+1).getImm() != 0)

2282 if (BadFIs.count(FI))

2283 continue;

2284

2286 if (Load) {

2287 if (LastStore[FI] == IndexType::None)

2288 LastStore[FI] = IndexType::Entry;

2289 LastLoad[FI] = Index;

2290 } else if (Store) {

2292 if (LastStore[FI] != IndexType::None)

2293 RL.add(LastStore[FI], LastLoad[FI], false, false);

2294 else if (LastLoad[FI] != IndexType::None)

2295 RL.add(IndexType::Entry, LastLoad[FI], false, false);

2296 LastLoad[FI] = IndexType::None;

2297 LastStore[FI] = Index;

2298 } else {

2300 }

2301 }

2302 }

2303

2304 for (auto &I : LastLoad) {

2305 IndexType LL = I.second;

2306 if (LL == IndexType::None)

2307 continue;

2308 auto &RL = FIRangeMap[I.first].Map[&B];

2309 IndexType &LS = LastStore[I.first];

2310 if (LS != IndexType::None)

2311 RL.add(LS, LL, false, false);

2312 else

2313 RL.add(IndexType::Entry, LL, false, false);

2314 LS = IndexType::None;

2315 }

2316 for (auto &I : LastStore) {

2317 IndexType LS = I.second;

2318 if (LS == IndexType::None)

2319 continue;

2320 auto &RL = FIRangeMap[I.first].Map[&B];

2321 RL.add(LS, IndexType::None, false, false);

2322 }

2323 }

2324

2326 for (auto &P : FIRangeMap) {

2327 dbgs() << "fi#" << P.first;

2328 if (BadFIs.count(P.first))

2329 dbgs() << " (bad)";

2330 dbgs() << " RC: ";

2331 if (P.second.RC != nullptr)

2332 dbgs() << HRI.getRegClassName(P.second.RC) << '\n';

2333 else

2334 dbgs() << "\n";

2335 for (auto &R : P.second.Map)

2337 << "}\n";

2338 }

2339 });

2340

2341

2342

2343

2345

2346 std::map<MachineBasicBlock*,std::vector> BlockFIMap;

2347

2348 for (auto &P : FIRangeMap) {

2349

2350 if (BadFIs.count(P.first))

2351 continue;

2352 for (auto &B : MF) {

2353 auto F = P.second.Map.find(&B);

2354

2355 if (F == P.second.Map.end() || F->second.empty())

2356 continue;

2358 if (IR.start() == IndexType::Entry)

2359 LoxFIs.insert(P.first);

2360 BlockFIMap[&B].push_back(P.first);

2361 }

2362 }

2363

2365 dbgs() << "Block-to-FI map (* -- live-on-exit):\n";

2366 for (auto &P : BlockFIMap) {

2367 auto &FIs = P.second;

2368 if (FIs.empty())

2369 continue;

2371 for (auto I : FIs) {

2372 dbgs() << " fi#" << I;

2373 if (LoxFIs.count(I))

2374 dbgs() << '*';

2375 }

2376 dbgs() << " }\n";

2377 }

2378 });

2379

2380#ifndef NDEBUG

2381 bool HasOptLimit = SpillOptMax.getPosition();

2382#endif

2383

2384

2385 for (auto &B : MF) {

2386 auto F = BlockIndexes.find(&B);

2387 assert(F != BlockIndexes.end());

2393

2394 for (auto FI : BlockFIMap[&B]) {

2395 if (BadFIs.count(FI))

2396 continue;

2397 LLVM_DEBUG(dbgs() << "Working on fi#" << FI << '\n');

2399 for (auto &Range : RL) {

2400 LLVM_DEBUG(dbgs() << "--Examining range:" << RL << '\n');

2401 if (!IndexType::isInstr(Range.start()) ||

2402 !IndexType::isInstr(Range.end()))

2403 continue;

2406 assert(SI.mayStore() && "Unexpected start instruction");

2407 assert(EI.mayLoad() && "Unexpected end instruction");

2409

2411 SrcOp.getSubReg() };

2412 auto *RC = HII.getRegClass(SI.getDesc(), 2, &HRI, MF);

2413

2414 Register FoundR = this->findPhysReg(MF, Range, IM, DM, RC);

2416 << '\n');

2417 if (FoundR == 0)

2418 continue;

2419#ifndef NDEBUG

2420 if (HasOptLimit) {

2422 return;

2424 }

2425#endif

2426

2427

2430 if (SrcRR.Reg != FoundR || SrcRR.Sub != 0) {

2432 CopyIn = BuildMI(B, StartIt, DL, HII.get(TargetOpcode::COPY), FoundR)

2434 }

2435

2436 ++StartIt;

2437

2438 if (LoxFIs.count(FI) && (&Range == &RL.back())) {

2439

2440 if (unsigned SR = SrcOp.getSubReg())

2441 SrcOp.setReg(HRI.getSubReg(FoundR, SR));

2442 else

2443 SrcOp.setReg(FoundR);

2444 SrcOp.setSubReg(0);

2445

2446 SrcOp.setIsKill(false);

2447 } else {

2448 B.erase(&SI);

2450 }

2451

2452 auto EndIt = std::next(EI.getIterator());

2453 for (auto It = StartIt; It != EndIt; It = NextIt) {

2455 NextIt = std::next(It);

2456 int TFI;

2457 if (!HII.isLoadFromStackSlot(MI, TFI) || TFI != FI)

2458 continue;

2459 Register DstR = MI.getOperand(0).getReg();

2460 assert(MI.getOperand(0).getSubReg() == 0);

2462 if (DstR != FoundR) {

2464 unsigned MemSize = HII.getMemAccessSize(MI);

2466 unsigned CopyOpc = TargetOpcode::COPY;

2467 if (HII.isSignExtendingLoad(MI))

2468 CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;

2469 else if (HII.isZeroExtendingLoad(MI))

2470 CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;

2471 CopyOut = BuildMI(B, It, DL, HII.get(CopyOpc), DstR)

2473 }

2475 B.erase(It);

2476 }

2477

2478

2482 }

2483 }

2484 }

2485}

2486

2487void HexagonFrameLowering::expandAlloca(MachineInstr *AI,

2492

2493

2494

2495

2496

2497

2498

2499

2500

2501

2502

2503

2504

2505

2506

2507

2511

2512

2513 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), Rd)

2516 if (Rs != Rd) {

2517

2518 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), SP)

2521 }

2522 if (A > 8) {

2523

2524 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), Rd)

2527 if (Rs != Rd)

2528 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), SP)

2531 }

2532 if (Rs == Rd) {

2533

2534 BuildMI(MB, AI, DL, HII.get(TargetOpcode::COPY), SP)

2536 }

2537 if (CF > 0) {

2538

2539 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_addi), Rd)

2542 }

2543}

2544

2548 return false;

2549

2550

2551

2552 return true;

2553}

2554

2557 for (auto &B : MF)

2558 for (auto &I : B)

2559 if (I.getOpcode() == Hexagon::PS_aligna)

2560 return &I;

2561 return nullptr;

2562}

2563

2564

2565

2566void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(MachineInstr *MI,

2567 const CSIVect &CSI, bool IsDef, bool IsKill) const {

2568

2569 for (auto &R : CSI)

2571}

2572

2573

2574

2575

2576

2577bool HexagonFrameLowering::shouldInlineCSR(const MachineFunction &MF,

2578 const CSIVect &CSI) const {

2580 return true;

2582 return true;

2584 return true;

2587 return true;

2588

2589

2590

2591 BitVector Regs(Hexagon::NUM_TARGET_REGS);

2594 if (!Hexagon::DoubleRegsRegClass.contains(R))

2595 return true;

2596 Regs[R] = true;

2597 }

2598 int F = Regs.find_first();

2599 if (F != Hexagon::D8)

2600 return true;

2601 while (F >= 0) {

2602 int N = Regs.find_next(F);

2603 if (N >= 0 && N != F+1)

2604 return true;

2605 F = N;

2606 }

2607

2608 return false;

2609}

2610

2611bool HexagonFrameLowering::useSpillFunction(const MachineFunction &MF,

2612 const CSIVect &CSI) const {

2613 if (shouldInlineCSR(MF, CSI))

2614 return false;

2615 unsigned NumCSI = CSI.size();

2616 if (NumCSI <= 1)

2617 return false;

2618

2621 return Threshold < NumCSI;

2622}

2623

2624bool HexagonFrameLowering::useRestoreFunction(const MachineFunction &MF,

2625 const CSIVect &CSI) const {

2626 if (shouldInlineCSR(MF, CSI))

2627 return false;

2628

2629

2630

2631

2632

2633

2635 return true;

2636 unsigned NumCSI = CSI.size();

2637 if (NumCSI <= 1)

2638 return false;

2639

2642 return Threshold < NumCSI;

2643}

2644

2645bool HexagonFrameLowering::mayOverflowFrameOffset(MachineFunction &MF) const {

2648

2649

2650 if (HST.useHVXOps() && StackSize > 256)

2651 return true;

2652

2653

2654

2655

2656

2657 bool HasImmStack = false;

2658 unsigned MinLS = ~0u;

2659

2662 unsigned LS = 0;

2663 switch (MI.getOpcode()) {

2664 case Hexagon::S4_storeirit_io:

2665 case Hexagon::S4_storeirif_io:

2666 case Hexagon::S4_storeiri_io:

2667 ++LS;

2668 [[fallthrough]];

2669 case Hexagon::S4_storeirht_io:

2670 case Hexagon::S4_storeirhf_io:

2671 case Hexagon::S4_storeirh_io:

2672 ++LS;

2673 [[fallthrough]];

2674 case Hexagon::S4_storeirbt_io:

2675 case Hexagon::S4_storeirbf_io:

2676 case Hexagon::S4_storeirb_io:

2677 if (MI.getOperand(0).isFI())

2678 HasImmStack = true;

2679 MinLS = std::min(MinLS, LS);

2680 break;

2681 }

2682 }

2683 }

2684

2685 if (HasImmStack)

2686 return !isUInt<6>(StackSize >> MinLS);

2687

2688 return false;

2689}

2690

2691namespace {

2692

2693struct HexagonFrameSortingObject {

2694 bool IsValid = false;

2695 unsigned Index = 0;

2696 unsigned Size = 0;

2697 Align ObjectAlignment = Align(1);

2698};

2699

2700struct HexagonFrameSortingComparator {

2701 inline bool operator()(const HexagonFrameSortingObject &A,

2702 const HexagonFrameSortingObject &B) const {

2703 return std::make_tuple(A.IsValid, A.ObjectAlignment, A.Size) <

2704 std::make_tuple(B.IsValid, B.ObjectAlignment, B.Size);

2705 }

2706};

2707}

2708

2709

2710

2713

2714 if (ObjectsToAllocate.empty())

2715 return;

2716

2718 int NObjects = ObjectsToAllocate.size();

2719

2720

2723

2724 for (int i = 0, j = 0, e = MFI.getObjectIndexEnd(); i < e && j != NObjects;

2725 ++i) {

2726 if (i != ObjectsToAllocate[j])

2727 continue;

2728 j++;

2729

2730

2731

2732

2734 if (Size == 0)

2735 return;

2736

2737 SortingObjects[i].IsValid = true;

2738 SortingObjects[i].Index = i;

2739 SortingObjects[i].Size = Size;

2740 SortingObjects[i].ObjectAlignment = MFI.getObjectAlign(i);

2741 }

2742

2743

2744 llvm::stable_sort(SortingObjects, HexagonFrameSortingComparator());

2745

2746

2747 int i = NObjects;

2748 for (auto &Obj : SortingObjects) {

2749 if (i == 0)

2750 break;

2751 ObjectsToAllocate[--i] = Obj.Index;

2752 }

2753}

unsigned const MachineRegisterInfo * MRI

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

This file contains the simple types necessary to represent the attributes associated with functions a...

This file implements the BitVector class.

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static RegisterPass< DebugifyModulePass > DM("debugify", "Attach debug info to everything")

This file defines the DenseMap class.

static MachineInstr * getReturn(MachineBasicBlock &MBB)

Returns the "return" instruction from this block, or nullptr if there isn't any.

static cl::opt< unsigned > ShrinkLimit("shrink-frame-limit", cl::init(std::numeric_limits< unsigned >::max()), cl::Hidden, cl::desc("Max count of stack frame shrink-wraps"))

static bool isOptNone(const MachineFunction &MF)

static cl::opt< int > SpillFuncThreshold("spill-func-threshold", cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"), cl::init(6))

static std::optional< MachineBasicBlock::iterator > findCFILocation(MachineBasicBlock &B)

static cl::opt< bool > EliminateFramePointer("hexagon-fp-elim", cl::init(true), cl::Hidden, cl::desc("Refrain from using FP whenever possible"))

static bool enableAllocFrameElim(const MachineFunction &MF)

static const char * getSpillFunctionFor(Register MaxReg, SpillKind SpillType, bool Stkchk=false)

static bool hasReturn(const MachineBasicBlock &MBB)

Returns true if MBB contains an instruction that returns.

static cl::opt< bool > EnableSaveRestoreLong("enable-save-restore-long", cl::Hidden, cl::desc("Enable long calls for save-restore stubs."), cl::init(false))

static bool needToReserveScavengingSpillSlots(MachineFunction &MF, const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC)

Returns true if there are no caller-saved registers available in class RC.

static bool isOptSize(const MachineFunction &MF)

static Register getMax32BitSubRegister(Register Reg, const TargetRegisterInfo &TRI, bool hireg=true)

Map a register pair Reg to the subregister that has the greater "number", i.e.

static cl::opt< int > SpillFuncThresholdOs("spill-func-threshold-Os", cl::Hidden, cl::desc("Specify Os spill func threshold"), cl::init(1))

static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR, const HexagonRegisterInfo &HRI)

Checks if the basic block contains any instruction that needs a stack frame to be already in place.

static cl::opt< bool > DisableDeallocRet("disable-hexagon-dealloc-ret", cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"))

static cl::opt< bool > EnableShrinkWrapping("hexagon-shrink-frame", cl::init(true), cl::Hidden, cl::desc("Enable stack frame shrink wrapping"))

static bool hasTailCall(const MachineBasicBlock &MBB)

Returns true if MBB has a machine instructions that indicates a tail call in the block.

static cl::opt< unsigned > NumberScavengerSlots("number-scavenger-slots", cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2))

static Register getMaxCalleeSavedReg(ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo &TRI)

Returns the callee saved register with the largest id in the vector.

static bool isMinSize(const MachineFunction &MF)

static cl::opt< unsigned > SpillOptMax("spill-opt-max", cl::Hidden, cl::init(std::numeric_limits< unsigned >::max()))

static unsigned SpillOptCount

static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI)

static bool isRestoreCall(unsigned Opc)

static cl::opt< bool > OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden, cl::init(true), cl::desc("Optimize spill slots"))

static cl::opt< bool > EnableStackOVFSanitizer("enable-stackovf-sanitizer", cl::Hidden, cl::desc("Enable runtime checks for stack overflow."), cl::init(false))

Legalize the Machine IR a function s Machine IR

This file implements the LivePhysRegs utility for tracking liveness of physical registers.

unsigned const TargetRegisterInfo * TRI

static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)

ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))

#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)

This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.

This file declares the machine register scavenger class.

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)

static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)

This file implements a set that has insertion order iteration characteristics.

This file defines the SmallSet class.

This file defines the SmallVector class.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

int find_first() const

find_first - Returns the index of the first set bit, -1 if none of the bits are set.

void resize(unsigned N, bool t=false)

resize - Grow or shrink the bitvector.

int find_next(unsigned Prev) const

find_next - Returns the index of the next set bit following the "Prev" bit.

The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...

This class represents an Operation in the Expression.

NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const

Find nearest common dominator basic block for basic block A and B.

bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const

dominates - Returns true iff A dominates B.

void recalculate(ParentType &Func)

recalculate - compute a dominator tree for the given function

FunctionPass class - This class is used to implement most global optimizations.

bool hasMinSize() const

Optimize this function for minimum size (-Oz).

bool hasOptNone() const

Do not optimize this function (-O0).

bool isVarArg() const

isVarArg - Return true if this function takes a variable number of arguments.

void replaceInstr(MachineInstr *OldMI, MachineInstr *NewMI)

IndexType getIndex(MachineInstr *MI) const

MachineInstr * getInstr(IndexType Idx) const

void add(IndexType Start, IndexType End, bool Fixed, bool TiedEnd)

const MachineInstr * getAlignaInstr(const MachineFunction &MF) const

void insertCFIInstructions(MachineFunction &MF) const

bool hasFPImpl(const MachineFunction &MF) const override

bool enableCalleeSaveSkip(const MachineFunction &MF) const override

Returns true if the target can safely skip saving callee-saved registers for noreturn nounwind functi...

MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override

This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...

StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override

getFrameIndexReference - This method should return the base register and offset used to reference a f...

void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override

Perform most of the PEI work here:

void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override

This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...

void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override

Order the symbols in the local stack frame.

void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override

processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...

const SpillSlot * getCalleeSavedSpillSlots(unsigned &NumEntries) const override

getCalleeSavedSpillSlots - This method returns a pointer to an array of pairs, that contains an entry...

bool needsAligna(const MachineFunction &MF) const

bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override

void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override

Store the specified register of the given register class to the specified stack frame index.

void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override

Load the specified register of the given register class from the specified stack frame index.

Hexagon target-specific information for each MachineFunction.

bool isEHReturnCalleeSaveReg(Register Reg) const

const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override

Code Generation virtual methods...

const MCPhysReg * getCallerSavedRegs(const MachineFunction *MF, const TargetRegisterClass *RC) const

const HexagonInstrInfo * getInstrInfo() const override

bool isEnvironmentMusl() const

A set of physical registers with utility functions to track liveness when walking backward/forward th...

static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})

.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.

static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})

.cfi_offset Previous value of Register is saved at offset Offset from CFA.

MCSymbol * createTempSymbol()

Create a temporary symbol with a unique name.

Describe properties that are true of each instruction in the target description file.

MCRegAliasIterator enumerates all registers aliasing Reg.

MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...

int getNumber() const

MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...

iterator getFirstTerminator()

Returns an iterator to the first terminator instruction of this basic block.

DebugLoc findDebugLoc(instr_iterator MBBI)

Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.

iterator getLastNonDebugInstr(bool SkipPseudoOp=true)

Returns an iterator to the last non-debug instruction in the basic block, or end().

void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())

Adds the specified register as a live in.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

instr_iterator erase(instr_iterator I)

Remove an instruction from the instruction list and delete it.

iterator_range< iterator > terminators()

iterator_range< succ_iterator > successors()

bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const

Return true if the specified register is in the live in set.

DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...

bool dominates(const MachineInstr *A, const MachineInstr *B) const

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

void setMaxCallFrameSize(uint64_t S)

bool hasVarSizedObjects() const

This method may be called any time after instruction selection is complete to determine if the stack ...

bool isObjectPreAllocated(int ObjectIdx) const

Return true if the object was pre-allocated into the local block.

uint64_t getStackSize() const

Return the number of bytes that must be allocated to hold all of the fixed size frame objects.

bool hasCalls() const

Return true if the current function has any function calls.

Align getMaxAlign() const

Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...

uint64_t getMaxCallFrameSize() const

Return the maximum size of a call frame that must be allocated for an outgoing function call.

int CreateSpillStackObject(uint64_t Size, Align Alignment)

Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...

uint64_t estimateStackSize(const MachineFunction &MF) const

Estimate and return the size of the stack frame.

Align getObjectAlign(int ObjectIdx) const

Return the alignment of the specified stack object.

int64_t getObjectSize(int ObjectIdx) const

Return the size of the specified object.

const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const

Returns a reference to call saved info vector for the current function.

int getObjectIndexEnd() const

Return one past the maximum frame object index.

int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)

Create a spill slot at a fixed location on the stack.

int64_t getObjectOffset(int ObjectIdx) const

Return the assigned stack offset of the specified object from the incoming stack pointer.

void setStackSize(uint64_t Size)

Set the size of the stack.

bool isFixedObjectIndex(int ObjectIdx) const

Returns true if the specified index corresponds to a fixed stack object.

MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...

virtual bool runOnMachineFunction(MachineFunction &MF)=0

runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...

virtual MachineFunctionProperties getRequiredProperties() const

Properties which a MachineFunction may have at a given point in time.

MachineFunctionProperties & set(Property P)

unsigned addFrameInst(const MCCFIInstruction &Inst)

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

StringRef getName() const

getName - Return the name of the corresponding LLVM function.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

bool needsFrameMoves() const

True if this function needs frame moves for debug or exceptions.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

MCContext & getContext() const

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

MachineBasicBlock * getBlockNumbered(unsigned N) const

getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...

Function & getFunction()

Return the LLVM function that this machine code represents.

unsigned getNumBlockIDs() const

getNumBlockIDs - Return the number of MBB ID's allocated.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

const MachineBasicBlock & front() const

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const

const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const

const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & add(const MachineOperand &MO) const

const MachineInstrBuilder & addFrameIndex(int Idx) const

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const

const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const

const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register definition operand.

instr_iterator getInstrIterator() const

Representation of each machine instruction.

unsigned getOpcode() const

Returns the opcode of this MachineInstr.

bool isReturn(QueryType Type=AnyInBundle) const

const MachineBasicBlock * getParent() const

void addOperand(MachineFunction &MF, const MachineOperand &Op)

Add the specified operand to the instruction.

void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)

Copy implicit register operands from specified instruction to this instruction.

bool mayLoad(QueryType Type=AnyInBundle) const

Return true if this instruction could possibly read memory.

const DebugLoc & getDebugLoc() const

Returns the debug location id of this MachineInstr.

const MachineOperand & getOperand(unsigned i) const

@ MOStore

The memory access writes data.

MachineOperand class - Representation of each machine instruction operand.

Register getReg() const

getReg - Returns the register number.

static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)

MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...

MachineBasicBlock * findNearestCommonDominator(ArrayRef< MachineBasicBlock * > Blocks) const

Returns the nearest common dominator of the given blocks.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...

static PassRegistry * getPassRegistry()

getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...

void addScavengingFrameIndex(int FI)

Add a scavenging frame index.

Wrapper class representing virtual and physical registers.

constexpr bool isValid() const

constexpr bool isPhysical() const

Return true if the specified register number is in the physical register namespace.

A vector that has set insertion semantics.

size_type size() const

Determine the number of elements in the SetVector.

bool insert(const value_type &X)

Insert a new element into the SetVector.

SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...

size_type count(const T &V) const

count - Return 1 if the element is in the set, 0 otherwise.

std::pair< const_iterator, bool > insert(const T &V)

insert - Insert an element into the set if it isn't already there.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

StackOffset holds a fixed and a scalable offset in bytes.

int64_t getFixed() const

Returns the fixed component of the stack.

static StackOffset getFixed(int64_t Fixed)

bool hasFP(const MachineFunction &MF) const

hasFP - Return true if the specified function should have a dedicated frame pointer register.

virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const

This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...

Align getStackAlign() const

getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...

Primary interface to the complete machine description for the target machine.

CodeGenOptLevel getOptLevel() const

Returns the optimization level: None, Less, Default, or Aggressive.

bool isPositionIndependent() const

unsigned getID() const

Return the register class ID number.

bool hasSubClassEq(const TargetRegisterClass *RC) const

Returns true if RC is a sub-class of or equal to this class.

ArrayRef< MCPhysReg > getRawAllocationOrder(const MachineFunction &MF) const

Returns the preferred order for allocating registers from this register class in MF.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

self_iterator getIterator()

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

@ C

The default llvm calling convention, compatible with C.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ Kill

The last use of a register.

Reg

All possible values of the reg field in the ModR/M byte.

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

void stable_sort(R &&Range)

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

FunctionPass * createHexagonCallFrameInformation()

void initializeHexagonCallFrameInformationPass(PassRegistry &)

unsigned getKillRegState(bool B)

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

auto find_if(R &&Range, UnaryPredicate P)

Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.

Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)

Prints virtual and physical registers with or without a TRI instance.

Printable printMBBReference(const MachineBasicBlock &MBB)

Prints a machine basic block reference.

This struct is a compact representation of a valid (non-zero power of two) alignment.

uint64_t value() const

This is a hole in the type system and should not be abused.

static RegisterSet expandToSubRegs(RegisterRef R, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI)

std::map< RegisterRef, RangeList > RegToRangeMap

static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)

Stack pointer relative access.