LLVM: lib/CodeGen/InlineSpiller.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

46#include "llvm/Config/llvm-config.h"

54#include

55#include

56#include

57#include

58

59using namespace llvm;

60

61#define DEBUG_TYPE "regalloc"

62

63STATISTIC(NumSpilledRanges, "Number of spilled live ranges");

64STATISTIC(NumSnippets, "Number of spilled snippets");

65STATISTIC(NumSpills, "Number of spills inserted");

66STATISTIC(NumSpillsRemoved, "Number of spills removed");

67STATISTIC(NumReloads, "Number of reloads inserted");

68STATISTIC(NumReloadsRemoved, "Number of reloads removed");

69STATISTIC(NumFolded, "Number of folded stack accesses");

70STATISTIC(NumFoldedLoads, "Number of folded loads");

71STATISTIC(NumRemats, "Number of rematerialized defs for spilling");

72

76 cl::desc("Restrict remat for statepoint operands"));

77

78namespace {

89

91

92

93

94

95

97

98

99

100

101 using MergeableSpillsMap =

103 MergeableSpillsMap MergeableSpills;

104

105

106

107

109

112

113 void rmRedundantSpills(

117

118 void getVisitOrders(

124

129

130public:

133 : MF(mf), LIS(Analyses.LIS), LSS(Analyses.LSS), MDT(Analyses.MDT),

134 VRM(vrm), MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),

135 TRI(*mf.getSubtarget().getRegisterInfo()), MBFI(Analyses.MBFI),

136 IPA(LIS, mf.getNumBlockIDs()) {}

137

138 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,

140 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);

141 void hoistAllSpills();

143};

144

145class InlineSpiller : public Spiller {

146 MachineFunction &MF;

147 LiveIntervals &LIS;

148 LiveStacks &LSS;

149 VirtRegMap &VRM;

150 MachineRegisterInfo &MRI;

151 const TargetInstrInfo &TII;

152 const TargetRegisterInfo &TRI;

153 LiveRegMatrix *Matrix = nullptr;

154

155

156 LiveRangeEdit *Edit = nullptr;

157 LiveInterval *StackInt = nullptr;

158 int StackSlot;

160 AllocationOrder *Order = nullptr;

161

162

164

165

166

168

169

170

171

172 SmallPtrSet<MachineInstr*, 8> SnippetCopies;

173

174

175 SmallPtrSet<VNInfo*, 8> UsedValues;

176

177

178 SmallVector<MachineInstr*, 8> DeadDefs;

179

180

181 HoistSpillHelper HSpiller;

182

183

184 VirtRegAuxInfo &VRAI;

185

186 ~InlineSpiller() override = default;

187

188public:

189 InlineSpiller(const Spiller::RequiredAnalyses &Analyses, MachineFunction &MF,

190 VirtRegMap &VRM, VirtRegAuxInfo &VRAI, LiveRegMatrix *Matrix)

191 : MF(MF), LIS(Analyses.LIS), LSS(Analyses.LSS), VRM(VRM),

192 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),

193 TRI(*MF.getSubtarget().getRegisterInfo()), Matrix(Matrix),

194 HSpiller(Analyses, MF, VRM), VRAI(VRAI) {}

195

196 void spill(LiveRangeEdit &, AllocationOrder *Order = nullptr) override;

198 ArrayRef getReplacedRegs() override { return RegsReplaced; }

199 void postOptimization() override;

200

201private:

202 bool isSnippet(const LiveInterval &SnipLI);

203 void collectRegsToSpill();

204

206

208 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);

209 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);

210

211 void markValueUsed(LiveInterval*, VNInfo*);

212 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);

213 bool hasPhysRegAvailable(const MachineInstr &MI);

214 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);

215 void reMaterializeAll();

216

217 bool coalesceStackAccess(MachineInstr *MI, Register Reg);

218 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,

219 MachineInstr *LoadMI = nullptr);

222

224 void spillAll();

225};

226

227}

228

230

231void Spiller::anchor() {}

232

237 return new InlineSpiller(Analyses, MF, VRM, VRAI, Matrix);

238}

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

256 if (TII.isCopyInstr(MI))

258

261

262

263 if (DstOp.getSubReg() != SrcOp.getSubReg())

270}

271

272

277

279 "expected to see first instruction in bundle");

280

283 while (I->isBundledWithSucc()) {

285 auto CopyInst = TII.isCopyInstr(MI);

286 if (!CopyInst)

288

292 if (!SnipReg)

297 if (!SnipReg)

301 }

302

303 ++I;

304 }

305

307}

308

311 if (MO.getReg().isVirtual())

313}

314

315

316

317

318bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {

320

321

322

323

324

325

326

327

328

329

330

331

332 if (!LIS.intervalIsInOneMBB(SnipLI))

333 return false;

334

335

336

338 for (auto *VNI : SnipLI.vnis()) {

339 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);

340 if (MI->getOpcode() == TargetOpcode::STATEPOINT)

341 --NumValNums;

342 }

343 if (NumValNums > 2)

344 return false;

345

347

348

350 RI = MRI.reg_bundle_nodbg_begin(SnipLI.reg()),

351 E = MRI.reg_bundle_nodbg_end();

352 RI != E;) {

354

355

357 continue;

358

359

360 int FI;

362 continue;

363

364

366 continue;

367

369 continue;

370

371

373 return false;

375 }

376 return true;

377}

378

379

380

381void InlineSpiller::collectRegsToSpill() {

383

384

385 RegsToSpill.assign(1, Reg);

386 SnippetCopies.clear();

387 RegsReplaced.clear();

388

389

390

391 if (Original == Reg)

392 return;

393

396 if (!isSibling(SnipReg))

397 continue;

398 LiveInterval &SnipLI = LIS.getInterval(SnipReg);

399 if (!isSnippet(SnipLI))

400 continue;

401 SnippetCopies.insert(&MI);

402 if (isRegToSpill(SnipReg))

403 continue;

404 RegsToSpill.push_back(SnipReg);

405 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');

406 ++NumSnippets;

407 }

408}

409

410bool InlineSpiller::isSibling(Register Reg) {

411 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;

412}

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,

435 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);

436#ifndef NDEBUG

439#endif

440

442 LiveInterval &SrcLI = LIS.getInterval(SrcReg);

447 return false;

448

449

450

451

452 assert(StackInt && "No stack slot assigned yet.");

453 LiveInterval &OrigLI = LIS.getInterval(Original);

455 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));

456 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "

457 << *StackInt << '\n');

458

459

460

461 eliminateRedundantSpills(SrcLI, SrcVNI);

462

467 else {

469 assert(DefMI && "Defining instruction disappeared");

471 ++MII;

472 }

474

477 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);

480 --MII;

481 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);

482

483

484

485

486 if (MIS.begin() == MII)

487 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);

488 ++NumSpills;

489 return true;

490}

491

492

493

494void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {

495 assert(VNI && "Missing value");

497 WorkList.push_back(std::make_pair(&SLI, VNI));

498 assert(StackInt && "No stack slot assigned yet.");

499

500 do {

504 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'

505 << VNI->def << " in " << *LI << '\n');

506

507

508 if (isRegToSpill(Reg))

509 continue;

510

511

512 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));

513 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');

514

515

519 continue;

520 SlotIndex Idx = LIS.getInstructionIndex(MI);

522 continue;

523

524

526 if (isSibling(DstReg)) {

527 LiveInterval &DstLI = LIS.getInterval(DstReg);

529 assert(DstVNI && "Missing defined value");

531

532 WorkList.push_back(std::make_pair(&DstLI, DstVNI));

533 }

534 continue;

535 }

536

537

538 int FI;

540 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);

541

542 MI.setDesc(TII.get(TargetOpcode::KILL));

543 DeadDefs.push_back(&MI);

544 ++NumSpillsRemoved;

545 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))

546 --NumSpills;

547 }

548 }

549 } while (!WorkList.empty());

550}

551

552

553

554

555

556

557

560 WorkList.push_back(std::make_pair(LI, VNI));

561 do {

563 if (!UsedValues.insert(VNI).second)

564 continue;

565

570 if (PVNI)

571 WorkList.push_back(std::make_pair(LI, PVNI));

572 }

573 continue;

574 }

575

576

578 if (!SnippetCopies.count(MI))

579 continue;

580 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());

581 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");

583 assert(SnipVNI && "Snippet undefined before copy");

584 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));

585 } while (!WorkList.empty());

586}

587

588bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,

591 return true;

592

593

594

595

596

597

598

599

600

601

602

603

604

605

606

607 if (MI.getOpcode() != TargetOpcode::STATEPOINT)

608 return true;

609

610

611

613 EndIdx = MI.getNumOperands();

614 Idx < EndIdx; ++Idx) {

617 return false;

618 }

619 return true;

620}

621

622

623

624bool InlineSpiller::hasPhysRegAvailable(const MachineInstr &MI) {

625 if (!Order || Matrix)

626 return false;

627

630

631 for (MCPhysReg PhysReg : *Order) {

632 if (Matrix->checkInterference(PrevIdx, UseIdx, PhysReg))

633 return true;

634 }

635

636 return false;

637}

638

639

641

644

645

646

649 return false;

650 }

651

654

655 if (!ParentVNI) {

658 if (MO.getReg() == VirtReg.reg())

661 return true;

662 }

663

664

665

666 if (SnippetCopies.count(&MI)) {

667 LLVM_DEBUG(dbgs() << "\tskipping remat snippet copy for " << UseIdx << '\t'

668 << MI);

669 return false;

670 }

671

672 LiveInterval &OrigLI = LIS.getInterval(Original);

674 assert(OrigVNI && "corrupted sub-interval");

676

677

678

679

681 markValueUsed(&VirtReg, ParentVNI);

682 LLVM_DEBUG(dbgs() << "\tcannot remat missing def for " << UseIdx << '\t'

683 << MI);

684 return false;

685 }

686

689 if (!Edit->canRematerializeAt(RM, UseIdx)) {

690 markValueUsed(&VirtReg, ParentVNI);

691 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);

692 return false;

693 }

694

695

696

697 if (RI.Tied) {

698 markValueUsed(&VirtReg, ParentVNI);

699 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);

700 return false;

701 }

702

703

704

705 if (RM.OrigMI->canFoldAsLoad() &&

706 (RM.OrigMI->mayLoad() || !hasPhysRegAvailable(MI)) &&

707 foldMemoryOperand(Ops, RM.OrigMI)) {

708 Edit->markRematerialized(RM.ParentVNI);

709 ++NumFoldedLoads;

710 return true;

711 }

712

713

714

715 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {

716 markValueUsed(&VirtReg, ParentVNI);

717 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);

718 return false;

719 }

720

721

722 Register NewVReg = Edit->createFrom(Original);

723

724

725 MRI.constrainRegClass(NewVReg, MRI.getRegClass(VirtReg.reg()));

726

727

729 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);

730

731

732

733 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);

734 NewMI->setDebugLoc(MI.getDebugLoc());

735

736 (void)DefIdx;

738 << *LIS.getInstructionFromIndex(DefIdx));

739

740

741 for (const auto &OpPair : Ops) {

742 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);

746 }

747 }

749

750 ++NumRemats;

751 return true;

752}

753

754

755

756void InlineSpiller::reMaterializeAll() {

757 UsedValues.clear();

758

759

760 bool anyRemat = false;

764

765 if (MI.isDebugValue())

766 continue;

767

768 assert(MI.isDebugInstr() && "Did not expect to find a use in debug "

769 "instruction that isn't a DBG_VALUE");

770

771 anyRemat |= reMaterializeFor(LI, MI);

772 }

773 }

774 if (!anyRemat)

775 return;

776

777

781 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))

782 continue;

784 MI->addRegisterDead(Reg, &TRI);

785 if (MI->allDefsAreDead())

786 continue;

788 DeadDefs.push_back(MI);

789

790

791

792 if (MI->isBundledWithSucc() && MI->isBundledWithPred()) {

794 EndIt = MI->getParent()->instr_end();

795 ++BeginIt;

796

797 bool OnlyDeadCopies = true;

799 It != EndIt && It->isBundledWithPred(); ++It) {

800

802 bool IsCopyToDeadReg =

803 DestSrc && DestSrc->Destination->getReg() == Reg;

804 if (!IsCopyToDeadReg) {

805 OnlyDeadCopies = false;

806 break;

807 }

808 }

809 if (OnlyDeadCopies) {

811 It != EndIt && It->isBundledWithPred(); ++It) {

812 It->addRegisterDead(Reg, &TRI);

814 DeadDefs.push_back(&*It);

815 }

816 }

817 }

818 }

819 }

820

821

822

823 if (DeadDefs.empty())

824 return;

825 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");

826 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);

827

828

829

830

831

832

833

834 unsigned ResultPos = 0;

836 if (MRI.reg_nodbg_empty(Reg)) {

837 Edit->eraseVirtReg(Reg);

838 RegsReplaced.push_back(Reg);

839 continue;

840 }

841

843 (!LIS.getInterval(Reg).empty() || MRI.reg_nodbg_empty(Reg)) &&

844 "Empty and not used live-range?!");

845

846 RegsToSpill[ResultPos++] = Reg;

847 }

848 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());

850 << " registers to spill after remat.\n");

851}

852

853

854

855

856

857

859 int FI = 0;

861 bool IsLoad = InstrReg.isValid();

862 if (!IsLoad)

864

865

866 if (InstrReg != Reg || FI != StackSlot)

867 return false;

868

869 if (!IsLoad)

870 HSpiller.rmFromMergeableSpills(*MI, StackSlot);

871

873 LIS.RemoveMachineInstrFromMaps(*MI);

874 MI->eraseFromParent();

875

876 if (IsLoad) {

877 ++NumReloadsRemoved;

878 --NumReloads;

879 } else {

880 ++NumSpillsRemoved;

881 --NumSpills;

882 }

883

884 return true;

885}

886

887#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)

889

893 const char *const header,

895 char NextLine = '\n';

896 char SlotIndent = '\t';

897

898 if (std::next(B) == E) {

899 NextLine = ' ';

900 SlotIndent = ' ';

901 }

902

903 dbgs() << '\t' << header << ": " << NextLine;

904

907

908

909

910

911 if (VReg) {

912 MachineOperand *MO = I->findRegisterDefOperand(VReg, nullptr);

915 }

916

917 dbgs() << SlotIndent << Idx << '\t' << *I;

918 }

919}

920#endif

921

922

923

924

925

926

927

928bool InlineSpiller::

929foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,

931 if (Ops.empty())

932 return false;

933

935 if (Ops.back().first != MI || MI->isBundled())

936 return false;

937

940

941

942

943

944

945

946

947 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;

948

949

950

952 MI->getOpcode() == TargetOpcode::STATEPOINT ||

953 MI->getOpcode() == TargetOpcode::PATCHPOINT ||

954 MI->getOpcode() == TargetOpcode::STACKMAP;

955

956

957

959 for (const auto &OpPair : Ops) {

960 unsigned Idx = OpPair.second;

961 assert(MI == OpPair.first && "Instruction conflict during operand folding");

963

964

965

966

968 continue;

969

971 ImpReg = MO.getReg();

972 continue;

973 }

974

975 if (!SpillSubRegs && MO.getSubReg())

976 return false;

977

978 if (LoadMI && MO.isDef())

979 return false;

980

981 if (UntieRegs || MI->isRegTiedToDefOperand(Idx))

983 }

984

985

986

987 if (FoldOps.empty())

988 return false;

989

991

993 if (UntieRegs)

994 for (unsigned Idx : FoldOps) {

997 continue;

998 unsigned Tied = MI->findTiedOperandIdx(Idx);

1001 else {

1002 assert(MO.isDef() && "Tied to not use and def?");

1004 }

1005 MI->untieRegOperand(Idx);

1006 }

1007

1011 if (!FoldMI) {

1012

1013 for (auto Tied : TiedOps)

1014 MI->tieOperands(Tied.first, Tied.second);

1015 return false;

1016 }

1017

1018

1020 if (!MO->isReg())

1021 continue;

1024 continue;

1025 }

1026

1027 if (MO->isUse())

1028 continue;

1031 continue;

1032

1033 assert(MO->isDead() && "Cannot fold physreg def");

1036 }

1037

1038 int FI;

1040 HSpiller.rmFromMergeableSpills(*MI, FI))

1041 --NumSpills;

1043

1044 if (MI->isCandidateForAdditionalCallInfo())

1045 MI->getMF()->moveAdditionalCallInfo(MI, FoldMI);

1046

1047

1048

1049

1050

1051

1052 if (MI->peekDebugInstrNum() && Ops[0].second == 0) {

1053

1054 auto MakeSubstitution = [this,FoldMI,MI,&Ops]() {

1055

1056 unsigned OldOperandNum = Ops[0].second;

1058 unsigned OldNum = MI->getDebugInstrNum();

1059 MF.makeDebugValueSubstitution({OldNum, OldOperandNum},

1061 };

1062

1064 if (Ops.size() == 1 && Op0.isDef()) {

1065 MakeSubstitution();

1066 } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() &&

1067 Op0.getReg() == MI->getOperand(1).getReg()) {

1068 MakeSubstitution();

1069 }

1070 } else if (MI->peekDebugInstrNum()) {

1071

1072

1073

1074

1075

1076 MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second);

1077 }

1078

1079 MI->eraseFromParent();

1080

1081

1082 assert(!MIS.empty() && "Unexpected empty span of instructions!");

1084 if (&MI != FoldMI)

1086

1087

1088

1089 if (ImpReg)

1090 for (unsigned i = FoldMI->getNumOperands(); i; --i) {

1093 break;

1094 if (MO.getReg() == ImpReg)

1096 }

1097

1099 "folded"));

1100

1101 if (!WasCopy)

1102 ++NumFolded;

1103 else if (Ops.front().second == 0) {

1104 ++NumSpills;

1105

1106

1107

1108 if (std::distance(MIS.begin(), MIS.end()) <= 1)

1109 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);

1110 } else

1111 ++NumReloads;

1112 return true;

1113}

1114

1115void InlineSpiller::insertReload(Register NewVReg,

1119

1123

1125

1127 NewVReg));

1128 ++NumReloads;

1129}

1130

1131

1132

1133

1135 if (!Def.isImplicitDef())

1136 return true;

1137

1138

1139

1140

1141 return Def.getOperand(0).getSubReg();

1142}

1143

1144

1145void InlineSpiller::insertSpill(Register NewVReg, bool isKill,

1147

1148

1149 assert(MI->isTerminator() && "Inserting a spill after a terminator");

1151

1155

1156 if (IsRealSpill)

1159 else

1160

1161

1162

1163

1164 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))

1166

1171

1174 ++NumSpills;

1175

1176

1177

1178 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)

1179 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);

1180}

1181

1182

1183void InlineSpiller::spillAroundUses(Register Reg) {

1186

1187

1189

1190 if (MI.isDebugValue()) {

1191

1193 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI);

1196 continue;

1197 }

1198

1199 assert(MI.isDebugInstr() && "Did not expect to find a use in debug "

1200 "instruction that isn't a DBG_VALUE");

1201

1202

1203 if (SnippetCopies.count(&MI))

1204 continue;

1205

1206

1207 if (coalesceStackAccess(&MI, Reg))

1208 continue;

1209

1210

1213

1214

1215

1219 Idx = VNI->def;

1220

1221

1223 if (SibReg && isSibling(SibReg)) {

1224

1225 if (isRegToSpill(SibReg)) {

1227 SnippetCopies.insert(&MI);

1228 continue;

1229 }

1231 if (hoistSpillInsideBB(OldLI, MI)) {

1232

1233 MI.getOperand(0).setIsDead();

1234 DeadDefs.push_back(&MI);

1235 continue;

1236 }

1237 } else {

1238

1240 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));

1241

1242 }

1243 }

1244

1245

1246 if (foldMemoryOperand(Ops))

1247 continue;

1248

1249

1250

1251 Register NewVReg = Edit->createFrom(Reg);

1252

1254 insertReload(NewVReg, Idx, &MI);

1255

1256

1257 bool hasLiveDef = false;

1258 for (const auto &OpPair : Ops) {

1259 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);

1261 if (MO.isUse()) {

1262 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))

1264 } else {

1266 hasLiveDef = true;

1267 }

1268 }

1269 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n');

1270

1271

1273 if (hasLiveDef)

1274 insertSpill(NewVReg, true, &MI);

1275 }

1276}

1277

1278

1279void InlineSpiller::spillAll() {

1280

1282 StackSlot = VRM.assignVirt2StackSlot(Original);

1283 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));

1284 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());

1285 } else

1286 StackInt = &LSS.getInterval(StackSlot);

1287

1288 if (Original != Edit->getReg())

1289 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);

1290

1291 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");

1293 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),

1295 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');

1296

1297

1299 spillAroundUses(Reg);

1300

1301

1303 VRM.assignVirt2StackSlot(Reg, StackSlot);

1304 }

1305

1306

1307 if (!DeadDefs.empty()) {

1308 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");

1309 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);

1310 }

1311

1312

1316 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");

1317

1319 MI.eraseFromBundle();

1320 }

1321 }

1322

1323

1325 Edit->eraseVirtReg(Reg);

1326}

1327

1329 ++NumSpilledRanges;

1330 Edit = &edit;

1331 Order = order;

1333

1334 Original = VRM.getOriginal(edit.getReg());

1335 StackSlot = VRM.getStackSlot(Original);

1336 StackInt = nullptr;

1337

1339 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))

1340 << ':' << edit.getParent() << "\nFrom original "

1341 << printReg(Original) << '\n');

1343 "Attempting to spill already spilled value.");

1344 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");

1345

1346 collectRegsToSpill();

1347 reMaterializeAll();

1348

1349

1350 if (!RegsToSpill.empty())

1351 spillAll();

1352

1353 Edit->calculateRegClassAndHint(MF, VRAI);

1354}

1355

1356

1357void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }

1358

1359

1360void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,

1364

1365

1366

1367 auto [Place, Inserted] = StackSlotToOrigLI.try_emplace(StackSlot);

1368 if (Inserted) {

1369 auto LI = std::make_unique(OrigLI.reg(), OrigLI.weight());

1371 Place->second = std::move(LI);

1372 }

1373

1375 VNInfo *OrigVNI = Place->second->getVNInfoAt(Idx.getRegSlot());

1376 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);

1377 MergeableSpills[MIdx].insert(&Spill);

1378}

1379

1380

1381

1382bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,

1383 int StackSlot) {

1384 auto It = StackSlotToOrigLI.find(StackSlot);

1385 if (It == StackSlotToOrigLI.end())

1386 return false;

1388 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());

1389 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);

1390 return MergeableSpills[MIdx].erase(&Spill);

1391}

1392

1393

1394

1395bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,

1397 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);

1398

1399

1400 if (Idx < OrigVNI.def) {

1401

1402

1403 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n");

1404 return false;

1405 }

1408 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");

1409

1410 for (const Register &SibReg : Siblings) {

1413 if (VNI) {

1414 LiveReg = SibReg;

1415 return true;

1416 }

1417 }

1418 return false;

1419}

1420

1421

1422

1423void HoistSpillHelper::rmRedundantSpills(

1427

1428

1429

1430 for (auto *const CurrentSpill : Spills) {

1434 if (PrevSpill) {

1437 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;

1438 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;

1439 SpillsToRm.push_back(SpillToRm);

1440 SpillBBToSpill[MDT.getNode(Block)] = SpillToKeep;

1441 } else {

1442 SpillBBToSpill[MDT.getNode(Block)] = CurrentSpill;

1443 }

1444 }

1445 for (auto *const SpillToRm : SpillsToRm)

1446 Spills.erase(SpillToRm);

1447}

1448

1449

1450

1451

1452

1453

1454

1455void HoistSpillHelper::getVisitOrders(

1461

1462

1464

1465

1467

1468

1469

1470

1472

1473

1474

1475

1476

1477

1478

1479 for (auto *const Spill : Spills) {

1483 while (Node != RootIDomNode) {

1484

1485

1486 if (Node != MDT[Block] && SpillBBToSpill[Node]) {

1487 SpillToRm = SpillBBToSpill[MDT[Block]];

1488 break;

1489

1490

1491

1492 } else if (WorkSet.count(Node)) {

1493 break;

1494 } else {

1495 NodesOnPath.insert(Node);

1496 }

1498 }

1499 if (SpillToRm) {

1500 SpillsToRm.push_back(SpillToRm);

1501 } else {

1502

1503

1504

1505

1506

1509 }

1510 NodesOnPath.clear();

1511 }

1512

1513

1514

1515 unsigned idx = 0;

1516 Orders.push_back(MDT.getNode(Root));

1517 do {

1520 if (WorkSet.count(Child))

1522 }

1523 } while (idx != Orders.size());

1525 "Orders have different size with WorkSet");

1526

1527#ifndef NDEBUG

1530 for (; RIt != Orders.rend(); RIt++)

1531 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");

1533#endif

1534}

1535

1536

1537

1538

1539void HoistSpillHelper::runHoistSpills(

1544

1546

1547

1548

1549

1550

1552

1554

1555 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);

1556

1558 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,

1559 SpillBBToSpill);

1560

1561

1562

1563

1564

1565 using NodesCostPair =

1566 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;

1568

1569

1570

1571

1572

1574 for (; RIt != Orders.rend(); RIt++) {

1576

1577

1578 if (auto It = SpillsToKeep.find(*RIt);

1579 It != SpillsToKeep.end() && !It->second) {

1580 auto &SIt = SpillsInSubTreeMap[*RIt];

1581 SIt.first.insert(*RIt);

1582

1583 SIt.second = MBFI.getBlockFreq(Block);

1584 continue;

1585 }

1586

1587

1588

1590 if (!SpillsInSubTreeMap.contains(Child))

1591 continue;

1592

1593

1594

1595

1596

1597

1598 auto &[SpillsInSubTree, SubTreeCost] = SpillsInSubTreeMap[*RIt];

1599 auto ChildIt = SpillsInSubTreeMap.find(Child);

1600 SubTreeCost += ChildIt->second.second;

1601 auto BI = ChildIt->second.first.begin();

1602 auto EI = ChildIt->second.first.end();

1603 SpillsInSubTree.insert(BI, EI);

1604 SpillsInSubTreeMap.erase(ChildIt);

1605 }

1606

1607 auto &[SpillsInSubTree, SubTreeCost] = SpillsInSubTreeMap[*RIt];

1608

1609 if (SpillsInSubTree.empty())

1610 continue;

1611

1612

1614 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))

1615 continue;

1616

1617

1618

1622 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {

1623

1624 for (auto *const SpillBB : SpillsInSubTree) {

1625

1626

1627 if (auto It = SpillsToKeep.find(SpillBB);

1628 It != SpillsToKeep.end() && !It->second) {

1629 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];

1630 SpillsToRm.push_back(SpillToRm);

1631 }

1632

1633 SpillsToKeep.erase(SpillBB);

1634 }

1635

1636

1637 SpillsToKeep[*RIt] = LiveReg;

1639 dbgs() << "spills in BB: ";

1640 for (const auto Rspill : SpillsInSubTree)

1641 dbgs() << Rspill->getBlock()->getNumber() << " ";

1642 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()

1643 << "\n";

1644 });

1645 SpillsInSubTree.clear();

1646 SpillsInSubTree.insert(*RIt);

1647 SubTreeCost = MBFI.getBlockFreq(Block);

1648 }

1649 }

1650

1651

1652 for (const auto &Ent : SpillsToKeep) {

1653 if (Ent.second)

1654 SpillsToIns[Ent.first->getBlock()] = Ent.second;

1655 }

1656}

1657

1658

1659

1660

1661

1662

1663

1664

1665

1666

1667

1668

1669

1670

1671

1672

1673

1674void HoistSpillHelper::hoistAllSpills() {

1676 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);

1677

1678 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {

1680 Register Original = VRM.getPreSplitReg(Reg);

1682 Virt2SiblingsMap[Original].insert(Reg);

1683 }

1684

1685

1686 for (auto &Ent : MergeableSpills) {

1687 int Slot = Ent.first.first;

1689 VNInfo *OrigVNI = Ent.first.second;

1691 if (Ent.second.empty())

1692 continue;

1693

1695 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"

1696 << "Equal spills in BB: ";

1697 for (const auto spill : EqValSpills)

1698 dbgs() << spill->getParent()->getNumber() << " ";

1699 dbgs() << "\n";

1700 });

1701

1702

1704

1706

1707 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);

1708

1710 dbgs() << "Finally inserted spills in BB: ";

1711 for (const auto &Ispill : SpillsToIns)

1712 dbgs() << Ispill.first->getNumber() << " ";

1713 dbgs() << "\nFinally removed spills in BB: ";

1714 for (const auto Rspill : SpillsToRm)

1715 dbgs() << Rspill->getParent()->getNumber() << " ";

1716 dbgs() << "\n";

1717 });

1718

1719

1720 LiveInterval &StackIntvl = LSS.getInterval(Slot);

1721 if (!SpillsToIns.empty() || !SpillsToRm.empty())

1723 StackIntvl.getValNumInfo(0));

1724

1725

1726 for (auto const &Insert : SpillsToIns) {

1736 ++NumSpills;

1737 }

1738

1739

1740 NumSpills -= SpillsToRm.size();

1741 for (auto *const RMEnt : SpillsToRm) {

1742 RMEnt->setDesc(TII.get(TargetOpcode::KILL));

1743 for (unsigned i = RMEnt->getNumOperands(); i; --i) {

1746 RMEnt->removeOperand(i - 1);

1747 }

1748 }

1749 Edit.eliminateDeadDefs(SpillsToRm, {});

1750 }

1751}

1752

1753

1754

1755void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {

1756 if (VRM.hasPhys(Old))

1757 VRM.assignVirt2Phys(New, VRM.getPhys(Old));

1759 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));

1760 else

1761 llvm_unreachable("VReg should be assigned either physreg or stackslot");

1762 if (VRM.hasShape(Old))

1763 VRM.assignVirt2Shape(New, VRM.getShape(Old));

1764}

unsigned const MachineRegisterInfo * MRI

MachineInstrBuilder & UseMI

MachineInstrBuilder MachineInstrBuilder & DefMI

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

#define LLVM_DUMP_METHOD

Mark debug helper function definitions like dump() that should not be stripped from debug builds.

This file defines the DenseMap class.

static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, Register VReg=Register())

Definition InlineSpiller.cpp:890

static Register isCopyOfBundle(const MachineInstr &FirstMI, Register Reg, const TargetInstrInfo &TII)

Check for a copy bundle as formed by SplitKit.

Definition InlineSpiller.cpp:273

static bool isRealSpill(const MachineInstr &Def)

Check if Def fully defines a VReg with an undefined value.

Definition InlineSpiller.cpp:1134

static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))

static Register isCopyOf(const MachineInstr &MI, Register Reg, const TargetInstrInfo &TII)

isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0.

Definition InlineSpiller.cpp:254

static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS)

Definition InlineSpiller.cpp:309

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

Register const TargetRegisterInfo * TRI

This file implements a map that provides insertion order iteration.

Promote Memory to Register

This file implements a set that has insertion order iteration characteristics.

This file defines the SmallPtrSet class.

This file defines the SmallVector class.

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

iterator find(const_arg_type_t< KeyT > Val)

bool erase(const KeyT &Val)

bool contains(const_arg_type_t< KeyT > Val) const

Return true if the specified key is in the map, false otherwise.

std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)

Determines the latest safe point in a block in which we can insert a split, spill or other instructio...

LiveInterval - This class represents the liveness of a register, or stack slot.

bool isSpillable() const

isSpillable - Can this interval be spilled?

SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)

SlotIndexes * getSlotIndexes() const

SlotIndex getInstructionIndex(const MachineInstr &Instr) const

Returns the base index of the given instruction.

VNInfo::Allocator & getVNInfoAllocator()

LiveInterval & getInterval(Register Reg)

void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E)

LLVM_ABI void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)

Remove value numbers and related live segments starting at position Pos that are part of any liverang...

MachineBasicBlock * getMBBFromIndex(SlotIndex index) const

SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)

Result of a LiveRange query.

bool isKill() const

Return true if the live-in value is killed by this instruction.

Callback methods for LiveRangeEdit owners.

const LiveInterval & getParent() const

VNInfo * getValNumInfo(unsigned ValNo)

getValNumInfo - Returns pointer to the specified val#.

LLVM_ABI void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)

MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...

iterator_range< vni_iterator > vnis()

LiveQueryResult Query(SlotIndex Idx) const

Query Liveness at Idx.

VNInfo * getVNInfoBefore(SlotIndex Idx) const

getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...

unsigned getNumValNums() const

void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)

Copies values numbers and live segments from Other into this range.

VNInfo * getVNInfoAt(SlotIndex Idx) const

getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.

const MCInstrDesc & get(unsigned Opcode) const

Return the machine instruction descriptor that corresponds to the specified instruction opcode.

MIBundleOperands - Iterate over all operands in a bundle of machine instructions.

LLVM_ABI iterator SkipPHIsLabelsAndDebug(iterator I, Register Reg=Register(), bool SkipPseudoOp=true)

Return the first instruction in MBB after I that is not a PHI, label or debug.

Instructions::iterator instr_iterator

Instructions::const_iterator const_instr_iterator

LLVM_ABI instr_iterator erase(instr_iterator I)

Remove an instruction from the instruction list and delete it.

iterator_range< pred_iterator > predecessors()

MachineInstrBundleIterator< MachineInstr > iterator

MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...

DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...

static const unsigned int DebugOperandMemNumber

A reserved operand number representing the instructions memory operand, for instructions that have a ...

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...

Representation of each machine instruction.

const MachineBasicBlock * getParent() const

unsigned getNumOperands() const

Retuns the total number of operands.

bool isBundledWithPred() const

Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...

LLVM_ABI void removeOperand(unsigned OpNo)

Erase an operand from an instruction, leaving it with one fewer operand than it started with.

bool isBundledWithSucc() const

Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...

LLVM_ABI unsigned getDebugInstrNum()

Fetch the instruction number of this MachineInstr.

const MachineOperand & getOperand(unsigned i) const

bool isBundled() const

Return true if this instruction part of a bundle.

MachineOperand class - Representation of each machine instruction operand.

unsigned getSubReg() const

bool readsReg() const

readsReg - Returns true if this operand reads the previous value of its register.

bool isReg() const

isReg - Tests if this is a MO_Register operand.

LLVM_ABI void setReg(Register Reg)

Change the register this operand corresponds to.

void setIsKill(bool Val=true)

void setIsUndef(bool Val=true)

bool isEarlyClobber() const

Register getReg() const

getReg - Returns the register number.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

defusechain_instr_iterator< true, true, true, false > reg_bundle_nodbg_iterator

reg_bundle_nodbg_iterator/reg_bundle_nodbg_begin/reg_bundle_nodbg_end - Walk all defs and uses of the...

This class implements a map that also provides access to all stored values in a deterministic order.

Wrapper class representing virtual and physical registers.

constexpr bool isStack() const

Return true if this is a stack slot.

static Register index2VirtReg(unsigned Index)

Convert a 0-based index to a virtual register number.

MCRegister asMCReg() const

Utility to check-convert this value to a MCRegister.

constexpr bool isValid() const

constexpr bool isVirtual() const

Return true if the specified register number is in the virtual register namespace.

SlotIndex - An opaque wrapper around machine indexes.

static bool isSameInstr(SlotIndex A, SlotIndex B)

isSameInstr - Return true if A and B refer to the same instruction.

SlotIndex getBaseIndex() const

Returns the base index for associated with this index.

SlotIndex getPrevSlot() const

Returns the previous slot in the index list.

SlotIndex getRegSlot(bool EC=false) const

Returns the register use/def slot in the current instruction for a normal or early-clobber def.

LLVM_ABI void removeSingleMachineInstrFromMaps(MachineInstr &MI)

Removes a single machine instruction MI from the mapping.

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

void insert_range(Range &&R)

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

A SetVector that performs no allocations if smaller than a certain size.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void push_back(const T &Elt)

reverse_iterator rbegin()

std::reverse_iterator< iterator > reverse_iterator

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

MI-level Statepoint operands.

LLVM_ABI bool isFoldableReg(Register Reg) const

Return true if Reg is used only in operands which can be folded to stack usage.

TargetInstrInfo - Interface to description of machine instruction set.

virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const

Load the specified register of the given register class from the specified stack frame index.

virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const

Store the specified register of the given register class to the specified stack frame index.

virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const

If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...

MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const

Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...

virtual bool isSubregFoldable() const

Check whether the target can fold a load that feeds a subreg operand (or a subreg operand that feeds ...

virtual Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const

If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...

std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const

If the specific machine instruction is a instruction that moves/copies value from one register to ano...

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

VNInfo - Value Number Information.

bool isUnused() const

Returns true if this value is unused.

unsigned id

The ID number of this value.

SlotIndex def

The index of the defining instruction.

bool isPHIDef() const

Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...

Calculate auxiliary information for a virtual register such as its spill weight and allocation hint.

static constexpr int NO_STACK_SLOT

self_iterator getIterator()

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

initializer< Ty > init(const Ty &Val)

NodeAddr< NodeBase * > Node

This is an optimization pass for GlobalISel generic memory operations.

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

LLVM_ABI PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg, const TargetRegisterInfo *TRI)

AnalyzePhysRegInBundle - Analyze how the current instruction or bundle uses a physical register.

iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)

Make a range that does early increment to allow mutation of the underlying range without disrupting i...

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)

AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.

DomTreeNodeBase< MachineBasicBlock > MachineDomTreeNode

unsigned getKillRegState(bool B)

uint16_t MCPhysReg

An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...

Spiller * createInlineSpiller(const Spiller::RequiredAnalyses &Analyses, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI, LiveRegMatrix *Matrix=nullptr)

Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...

ArrayRef(const T &OneElt) -> ArrayRef< T >

LLVM_ABI MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)

Clone a DBG_VALUE whose value has been spilled to FrameIndex.

bool is_contained(R &&Range, const E &Element)

Returns true if Element is found in Range.

BumpPtrAllocatorImpl<> BumpPtrAllocator

The standard BumpPtrAllocator which just uses the default template parameters.

LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)

Prints virtual and physical registers with or without a TRI instance.

Remat - Information needed to rematerialize at a specific location.

Information about how a physical register Reg is used by a set of operands.

bool FullyDefined

Reg or a super-register is defined.

VirtRegInfo - Information about a virtual register used by a set of operands.

bool Reads

Reads - One of the operands read the virtual register.

bool Tied

Tied - Uses and defs must use the same register.

bool Writes

Writes - One of the operands writes the virtual register.