LLVM: lib/CodeGen/MachineSink.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

58#include

59#include

60#include

61#include

62

63using namespace llvm;

64

65#define DEBUG_TYPE "machine-sink"

66

69 cl::desc("Split critical edges during machine sinking"),

71

73 "machine-sink-bfi",

74 cl::desc("Use block frequency info to find successors to sink"),

76

78 "machine-sink-split-probability-threshold",

80 "Percentage threshold for splitting single-instruction critical edge. "

81 "If the branch threshold is higher than this threshold, we allow "

82 "speculative execution of up to 1 instruction to avoid branching to "

83 "splitted critical edge"),

85

87 "machine-sink-load-instrs-threshold",

88 cl::desc("Do not try to find alias store for a load if there is a in-path "

89 "block whose instruction number is higher than this threshold."),

91

93 "machine-sink-load-blocks-threshold",

94 cl::desc("Do not try to find alias store for a load if the block number in "

95 "the straight line is higher than this threshold."),

97

100 cl::desc("Sink instructions into cycles to avoid "

101 "register spills"),

103

105 "machine-sink-cycle-limit",

107 "The maximum number of instructions considered for cycle sinking."),

109

110STATISTIC(NumSunk, "Number of machine instructions sunk");

111STATISTIC(NumCycleSunk, "Number of machine instructions sunk into a cycle");

112STATISTIC(NumSplit, "Number of critical edges split");

113STATISTIC(NumCoalesces, "Number of copies coalesced");

114STATISTIC(NumPostRACopySink, "Number of copies sunk after RA");

115

116namespace {

117

131

132

134 CEBCandidates;

135

136

137

138

139

140

142 CEMergeCandidates;

143

144

145

147

149

150 using AllSuccsCache =

152

153

154

155

156

157

158

159

160

161

163

164

165

167

168

169

171

173 HasStoreCache;

174

177 StoreInstrCache;

178

179

181 CachedRegisterPressure;

182

183 bool EnableSinkAndFold;

184

185public:

186 static char ID;

187

190 }

191

193

207 }

208

210 CEBCandidates.clear();

211 CEMergeCandidates.clear();

212 }

213

214private:

222

225

226

227

228

229

230

231

232

233

234

235

236

237

241 AllSuccsCache &AllSuccessors);

242

243

244

245

246

247 void SalvageUnsunkDebugUsersOfCopy(MachineInstr &,

251 bool &LocalUse) const;

253 bool &BreakPHIEdge,

254 AllSuccsCache &AllSuccessors);

255

259

263 AllSuccsCache &AllSuccessors);

264

265 bool PerformTrivialForwardCoalescing(MachineInstr &MI,

267

269

272 AllSuccsCache &AllSuccessors) const;

273

275

276 bool registerPressureSetExceedsLimit(unsigned NRegs,

279};

280

281}

282

283char MachineSinking::ID = 0;

284

286

288 false)

296

297

298

306 ++PI) {

307

308 if (TII->isBasicBlockPrologue(*PI))

309 continue;

310 for (auto &MO : MI.operands()) {

311 if (!MO.isReg())

312 continue;

314 if (Reg)

315 continue;

316 if (MO.isUse()) {

317 if (Reg.isPhysical() &&

318 (TII->isIgnorableUse(MO) || (MRI && MRI->isConstantPhysReg(Reg))))

319 continue;

320 if (PI->modifiesRegister(Reg, TRI))

321 return true;

322 } else {

323 if (PI->readsRegister(Reg, TRI))

324 return true;

325

326 auto *DefOp = PI->findRegisterDefOperand(Reg, TRI, false, true);

327 if (DefOp && !DefOp->isDead())

328 return true;

329 }

330 }

331 }

332

333 return false;

334}

335

336bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI,

338 if (MI.isCopy())

339 return false;

340

341 Register SrcReg = MI.getOperand(1).getReg();

342 Register DstReg = MI.getOperand(0).getReg();

344 MRI->hasOneNonDBGUse(SrcReg))

345 return false;

346

349 if (SRC != DRC)

350 return false;

351

354 return false;

357 MRI->replaceRegWith(DstReg, SrcReg);

358 MI.eraseFromParent();

359

360

361

362 MRI->clearKillFlags(SrcReg);

363

364 ++NumCoalesces;

365 return true;

366}

367

368bool MachineSinking::PerformSinkAndFold(MachineInstr &MI,

370 if (MI.isCopy() || MI.mayLoadOrStore() ||

371 MI.getOpcode() == TargetOpcode::REG_SEQUENCE)

372 return false;

373

374

376 return false;

377

378

379 bool SawStore = true;

380 if (MI.isSafeToMove(SawStore))

381 return false;

382

383

384

385 if (MI.isConvergent())

386 return false;

387

388

389

390

391

395 if (MO.isImm() || MO.isRegMask() || MO.isRegLiveOut() || MO.isMetadata() ||

396 MO.isMCSymbol() || MO.isDbgInstrRef() || MO.isCFIIndex() ||

397 MO.isIntrinsicID() || MO.isPredicate() || MO.isShuffleMask())

398 continue;

399 if (!MO.isReg())

400 return false;

401

403 if (Reg == 0)

404 continue;

405

406 if (Reg.isVirtual()) {

407 if (MO.isDef()) {

408 if (DefReg)

409 return false;

410 DefReg = Reg;

411 continue;

412 }

413

414 if (UsedRegA == 0)

415 UsedRegA = Reg;

416 else if (UsedRegB == 0)

417 UsedRegB = Reg;

418 else

419 return false;

420 continue;

421 }

422

423 if (Reg.isPhysical() && MO.isUse() &&

424 (MRI->isConstantPhysReg(Reg) || TII->isIgnorableUse(MO)))

425 continue;

426

427 return false;

428 }

429

430

431

432

433

434 using SinkInfo = std::pair<MachineInstr *, ExtAddrMode>;

437

440 UsedRegA == 0 ? nullptr : MRI->getRegClass(UsedRegA);

442 UsedRegB == 0 ? nullptr : MRI->getRegClass(UsedRegB);

443

445 while (!Worklist.empty()) {

447

451 if (UseInst.isCopy()) {

454 DstReg = O.getReg();

455 if (DstReg == 0)

456 return false;

459 continue;

460 }

461

462

464 return false;

465

466

468 return false;

471 if (TII->canFoldIntoAddrMode(UseInst, Reg, MI, AM))

472 return false;

473 MaybeAM = AM;

474 } else {

475 return false;

476 }

477

478 if (UseInst.getParent() != MI.getParent()) {

479

480

481

484 RCA = nullptr;

486 RCB = nullptr;

487 if (RCA || RCB) {

488 if (RCA == nullptr) {

489 RCA = RCB;

490 RCB = nullptr;

491 }

492

493 unsigned NRegs = !!RCA + !!RCB;

494 if (RCA == RCB)

495 RCB = nullptr;

496

497

499 if (RCB == nullptr) {

500 if (registerPressureSetExceedsLimit(NRegs, RCA, MBB))

501 return false;

502 } else if (registerPressureSetExceedsLimit(1, RCA, MBB) ||

503 registerPressureSetExceedsLimit(1, RCB, MBB)) {

504 return false;

505 }

506 }

507 }

508

510 }

511 }

512

513 if (SinkInto.empty())

514 return false;

515

516

517 for (auto &[SinkDst, MaybeAM] : SinkInto) {

520 SinkDst->dump());

521 if (SinkDst->isCopy()) {

522

523

524

525

526

527

528

529

530

531

532

534 Register DstReg = SinkDst->getOperand(0).getReg();

535 TII->reMaterialize(*SinkDst->getParent(), InsertPt, DstReg, 0, MI, *TRI);

536 New = &*std::prev(InsertPt);

537 if (New->getDebugLoc())

538 New->setDebugLoc(SinkDst->getDebugLoc());

539

540

541

542

543 if (UsedRegA)

544 MRI->clearKillFlags(UsedRegA);

545 if (UsedRegB)

546 MRI->clearKillFlags(UsedRegB);

547 } else {

548

549 New = TII->emitLdStWithAddr(*SinkDst, MaybeAM);

550

551

552

553

555 MRI->clearKillFlags(R);

557 MRI->clearKillFlags(R);

558 }

560

561 if (SinkDst->mayStore() && !SinkDst->hasOrderedMemoryRef())

562 StoreInstrCache.clear();

563 SinkDst->eraseFromParent();

564 }

565

566

567

568

571 while (!Worklist.empty()) {

575 assert((U->isCopy() || U->isDebugInstr()) &&

576 "Only debug uses and copies must remain");

577 if (U->isCopy())

578 Worklist.push_back(U->getOperand(0).getReg());

580 }

581 }

582

583

586 if (I->isCopy()) {

587 I->eraseFromParent();

588 } else {

589 MO->setReg(0);

590 MO->setSubReg(0);

591 }

592 }

593

594 MI.eraseFromParent();

595 return true;

596}

597

598

599

600

601

602bool MachineSinking::AllUsesDominatedByBlock(Register Reg,

605 bool &BreakPHIEdge,

606 bool &LocalUse) const {

607 assert(Reg.isVirtual() && "Only makes sense for vregs");

608

609

610 if (MRI->use_nodbg_empty(Reg))

611 return true;

612

613

614

615

616

617

618

619

620

621

622

623

624

625

626

628 MachineInstr *UseInst = MO.getParent();

629 unsigned OpNo = MO.getOperandNo();

630 MachineBasicBlock *UseBlock = UseInst->getParent();

631 return UseBlock == MBB && UseInst->isPHI() &&

632 UseInst->getOperand(OpNo + 1).getMBB() == DefMBB;

633 })) {

634 BreakPHIEdge = true;

635 return true;

636 }

637

639

641 unsigned OpNo = &MO - &UseInst->getOperand(0);

643 if (UseInst->isPHI()) {

644

645

647 } else if (UseBlock == DefMBB) {

648 LocalUse = true;

649 return false;

650 }

651

652

654 return false;

655 }

656

657 return true;

658}

659

660

661

663 assert(MI.mayLoad() && "Expected MI that loads!");

664

665

666

667 if (MI.memoperands_empty())

668 return true;

669

672 if (PSV->isGOT() || PSV->isConstantPool())

673 return true;

674

675 return false;

676}

677

678void MachineSinking::FindCycleSinkCandidates(

681 for (auto &MI : *BB) {

682 LLVM_DEBUG(dbgs() << "CycleSink: Analysing candidate: " << MI);

684 LLVM_DEBUG(dbgs() << "CycleSink: Instruction not a candidate for this "

685 "target\n");

686 continue;

687 }

689 LLVM_DEBUG(dbgs() << "CycleSink: Instruction is not cycle invariant\n");

690 continue;

691 }

692 bool DontMoveAcrossStore = true;

693 if (MI.isSafeToMove(DontMoveAcrossStore)) {

694 LLVM_DEBUG(dbgs() << "CycleSink: Instruction not safe to move.\n");

695 continue;

696 }

698 LLVM_DEBUG(dbgs() << "CycleSink: Dont sink GOT or constant pool loads\n");

699 continue;

700 }

701 if (MI.isConvergent())

702 continue;

703

706 continue;

707 if (MRI->hasOneDef(MO.getReg()))

708 continue;

709

710 LLVM_DEBUG(dbgs() << "CycleSink: Instruction added as candidate.\n");

712 }

713}

714

715bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {

717 return false;

718

719 LLVM_DEBUG(dbgs() << "******** Machine Sinking ********\n");

720

725 DT = &getAnalysis().getDomTree();

726 PDT = &getAnalysis().getPostDomTree();

727 CI = &getAnalysis().getCycleInfo();

728 PSI = &getAnalysis().getPSI();

730 ? &getAnalysis().getMBFI()

731 : nullptr;

732 MBPI = &getAnalysis().getMBPI();

733 AA = &getAnalysis().getAAResults();

735 TargetPassConfig *PassConfig = &getAnalysis();

737

738 bool EverMadeChange = false;

739

740 while (true) {

741 bool MadeChange = false;

742

743

744 CEBCandidates.clear();

745 CEMergeCandidates.clear();

747 for (auto &MBB : MF)

749

750

752 MachineDomTreeUpdater::UpdateStrategy::Lazy);

753 for (const auto &Pair : ToSplit) {

754 auto NewSucc =

755 Pair.first->SplitCriticalEdge(Pair.second, *this, nullptr, &MDTU);

756 if (NewSucc != nullptr) {

757 LLVM_DEBUG(dbgs() << " *** Splitting critical edge: "

761 if (MBFI)

762 MBFI->onEdgeSplit(*Pair.first, *NewSucc, *MBPI);

763

764 MadeChange = true;

765 ++NumSplit;

767 } else

768 LLVM_DEBUG(dbgs() << " *** Not legal to break critical edge\n");

769 }

770

771 if (!MadeChange)

772 break;

773 EverMadeChange = true;

774 }

775

778 for (auto *Cycle : Cycles) {

780 if (!Preheader) {

781 LLVM_DEBUG(dbgs() << "CycleSink: Can't find preheader\n");

782 continue;

783 }

785 FindCycleSinkCandidates(Cycle, Preheader, Candidates);

786

787

788

789

790 unsigned i = 0;

793 LLVM_DEBUG(dbgs() << "CycleSink: Limit reached of instructions to "

794 "be analysed.");

795 break;

796 }

797

798 if (!SinkIntoCycle(Cycle, *I))

799 break;

800 EverMadeChange = true;

801 ++NumCycleSunk;

802 }

803 }

804 }

805

806 HasStoreCache.clear();

807 StoreInstrCache.clear();

808

809

810 for (auto I : RegsToClearKillFlags)

811 MRI->clearKillFlags(I);

812 RegsToClearKillFlags.clear();

813

814 return EverMadeChange;

815}

816

819 return false;

820

821

822

823

825 return false;

826

827 bool MadeChange = false;

828

829

830 AllSuccsCache AllSuccessors;

831

832

834 --I;

835 bool ProcessedBegin, SawStore = false;

836 do {

838

839

840

841 ProcessedBegin = I == MBB.begin();

842 if (!ProcessedBegin)

843 --I;

844

845 if (MI.isDebugOrPseudoInstr() || MI.isFakeUse()) {

846 if (MI.isDebugValue())

847 ProcessDbgInst(MI);

848 continue;

849 }

850

851 if (EnableSinkAndFold && PerformSinkAndFold(MI, &MBB)) {

852 MadeChange = true;

853 continue;

854 }

855

856

858 continue;

859

860 if (PerformTrivialForwardCoalescing(MI, &MBB)) {

861 MadeChange = true;

862 continue;

863 }

864

866 ++NumSunk;

867 MadeChange = true;

868 }

869

870

871 } while (!ProcessedBegin);

872

873 SeenDbgUsers.clear();

874 SeenDbgVars.clear();

875

876 CachedRegisterPressure.clear();

877 return MadeChange;

878}

879

880void MachineSinking::ProcessDbgInst(MachineInstr &MI) {

881

882

883 assert(MI.isDebugValue() && "Expected DBG_VALUE for processing");

884

885 DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(),

886 MI.getDebugLoc()->getInlinedAt());

887 bool SeenBefore = SeenDbgVars.contains(Var);

888

891 SeenDbgUsers[MO.getReg()].push_back(SeenDbgUser(&MI, SeenBefore));

892 }

893

894

895 SeenDbgVars.insert(Var);

896}

897

898bool MachineSinking::isWorthBreakingCriticalEdge(

901

902

903

904

905

906 if (!CEBCandidates.insert(std::make_pair(From, To)).second)

907 return true;

908

910 return true;

911

912

913

914

915

916

917 for (const auto &MO : MI.all_defs()) {

919 if (Reg)

920 continue;

922 auto Key = std::make_pair(SrcReg, To);

924

925

926 if (!Res.second) {

927

928 DeferredFromBlock = Res.first->second;

929 return true;

930 }

931 }

932

933 if (From->isSuccessor(To) &&

936 return true;

937

938

939

940

943 if (Reg == 0)

944 continue;

945

946

947

948 if (Reg.isPhysical())

949 continue;

950

951

952

953

954 if (MRI->hasOneNonDBGUse(Reg)) {

955

956

957

958

961 return true;

962 }

963 }

964

965

966

967 return TII->shouldBreakCriticalEdgeToSink(MI);

968}

969

970bool MachineSinking::isLegalToBreakCriticalEdge(MachineInstr &MI,

973 bool BreakPHIEdge) {

974

976 return false;

977

980

981

982 if (FromCycle == ToCycle && FromCycle &&

984 return false;

985

986

987

988

989

990

991

992

993

994

995

996

997

998

999

1000

1001

1002

1003

1004

1005

1006

1007

1008

1009

1010

1011

1012

1013

1014

1015

1016

1017

1018

1019

1020

1021

1022

1023

1024

1025 if (!BreakPHIEdge) {

1027 if (Pred != FromBB && !DT->dominates(ToBB, Pred))

1028 return false;

1029 }

1030

1031 return true;

1032}

1033

1034bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI,

1037 bool BreakPHIEdge) {

1038 bool Status = false;

1040 if (isWorthBreakingCriticalEdge(MI, FromBB, ToBB, DeferredFromBB)) {

1041

1042

1043 if ((!DeferredFromBB ||

1044 ToSplit.count(std::make_pair(DeferredFromBB, ToBB)) ||

1045 isLegalToBreakCriticalEdge(MI, DeferredFromBB, ToBB, BreakPHIEdge)) &&

1046 isLegalToBreakCriticalEdge(MI, FromBB, ToBB, BreakPHIEdge)) {

1047 ToSplit.insert(std::make_pair(FromBB, ToBB));

1048 if (DeferredFromBB)

1049 ToSplit.insert(std::make_pair(DeferredFromBB, ToBB));

1051 }

1052 }

1053

1055}

1056

1057std::vector &

1059

1060

1061

1062

1063 auto RP = CachedRegisterPressure.find(&MBB);

1064 if (RP != CachedRegisterPressure.end())

1065 return RP->second;

1066

1069

1070

1072 false, true);

1073

1076 MII != MIE; --MII) {

1078 if (MI.isDebugInstr() || MI.isPseudoProbe())

1079 continue;

1082 RPTracker.recedeSkipDebugValues();

1083 assert(&*RPTracker.getPos() == &MI && "RPTracker sync error!");

1084 RPTracker.recede(RegOpers);

1085 }

1086

1087 RPTracker.closeRegion();

1088 auto It = CachedRegisterPressure.insert(

1089 std::make_pair(&MBB, RPTracker.getPressure().MaxSetPressure));

1090 return It.first->second;

1091}

1092

1093bool MachineSinking::registerPressureSetExceedsLimit(

1096 unsigned Weight = NRegs * TRI->getRegClassWeight(RC).RegWeight;

1097 const int *PS = TRI->getRegClassPressureSets(RC);

1098 std::vector BBRegisterPressure = getBBRegisterPressure(MBB);

1099 for (; *PS != -1; PS++)

1100 if (Weight + BBRegisterPressure[*PS] >=

1102 return true;

1103 return false;

1104}

1105

1106

1110 AllSuccsCache &AllSuccessors) {

1111 assert(SuccToSinkTo && "Invalid SinkTo Candidate BB");

1112

1113 if (MBB == SuccToSinkTo)

1114 return false;

1115

1116

1118 return true;

1119

1120

1121

1123 return true;

1124

1125

1126 bool NonPHIUse = false;

1129 if (UseBlock == SuccToSinkTo && !UseInst.isPHI())

1130 NonPHIUse = true;

1131 }

1132 if (!NonPHIUse)

1133 return true;

1134

1135

1136

1137 bool BreakPHIEdge = false;

1138

1140 FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))

1141 return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors);

1142

1144

1145

1146

1147 if (!MCycle)

1148 return false;

1149

1150

1151

1153

1154 if (!MO.isReg())

1155 continue;

1157 if (Reg == 0)

1158 continue;

1159

1160 if (Reg.isPhysical()) {

1161

1162 if (MO.isUse() && MRI->isConstantPhysReg(Reg) &&

1163 TII->isIgnorableUse(MO))

1164 return false;

1165 continue;

1166 }

1167

1168

1169 if (MO.isDef()) {

1170

1171 bool LocalUse = false;

1172 if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB, BreakPHIEdge,

1173 LocalUse))

1174 return false;

1175 } else {

1178 continue;

1180

1181

1182

1183

1186 continue;

1187

1188

1189

1190 if (registerPressureSetExceedsLimit(1, MRI->getRegClass(Reg),

1191 *SuccToSinkTo)) {

1192 LLVM_DEBUG(dbgs() << "register pressure exceed limit, not profitable.");

1193 return false;

1194 }

1195 }

1196 }

1197

1198

1199

1200

1201 return true;

1202}

1203

1204

1205

1208 AllSuccsCache &AllSuccessors) const {

1209

1210 auto Succs = AllSuccessors.find(MBB);

1211 if (Succs != AllSuccessors.end())

1212 return Succs->second;

1213

1215

1216

1217

1218

1219

1220

1221

1222

1224

1225 if (DTChild->getIDom()->getBlock() == MI.getParent() &&

1226

1228 AllSuccs.push_back(DTChild->getBlock());

1229 }

1230

1231

1237 (!LHSFreq && !RHSFreq))

1239 return LHSFreq < RHSFreq;

1240 });

1241

1242 auto it = AllSuccessors.insert(std::make_pair(MBB, AllSuccs));

1243

1244 return it.first->second;

1245}

1246

1247

1250 bool &BreakPHIEdge,

1251 AllSuccsCache &AllSuccessors) {

1252 assert(MBB && "Invalid MachineBasicBlock!");

1253

1254

1255

1256

1257

1258

1261 if (!MO.isReg())

1262 continue;

1263

1265 if (Reg == 0)

1266 continue;

1267

1268 if (Reg.isPhysical()) {

1269 if (MO.isUse()) {

1270

1271

1272

1273 if (MRI->isConstantPhysReg(Reg) && TII->isIgnorableUse(MO))

1274 return nullptr;

1275 } else if (!MO.isDead()) {

1276

1277 return nullptr;

1278 }

1279 } else {

1280

1282 continue;

1283

1284

1285 if (TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))

1286 return nullptr;

1287

1288

1289

1290 if (SuccToSinkTo) {

1291

1292

1293 bool LocalUse = false;

1294 if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB, BreakPHIEdge,

1295 LocalUse))

1296 return nullptr;

1297

1298 continue;

1299 }

1300

1301

1302

1303

1304

1306 GetAllSortedSuccessors(MI, MBB, AllSuccessors)) {

1307 bool LocalUse = false;

1308 if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB, BreakPHIEdge,

1309 LocalUse)) {

1310 SuccToSinkTo = SuccBlock;

1311 break;

1312 }

1313 if (LocalUse)

1314

1315 return nullptr;

1316 }

1317

1318

1319 if (!SuccToSinkTo)

1320 return nullptr;

1321 if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors))

1322 return nullptr;

1323 }

1324 }

1325

1326

1327

1328 if (MBB == SuccToSinkTo)

1329 return nullptr;

1330

1331

1332

1333 if (SuccToSinkTo && SuccToSinkTo->isEHPad())

1334 return nullptr;

1335

1336

1337

1338

1339

1341 return nullptr;

1342

1343 if (SuccToSinkTo && TII->isSafeToSink(MI, SuccToSinkTo, CI))

1344 return nullptr;

1345

1346 return SuccToSinkTo;

1347}

1348

1349

1350

1351

1352

1353

1354

1355

1360

1361 auto *MBB = MI.getParent();

1363 return false;

1364

1366 auto *PredBB = PredMBB->getBasicBlock();

1367

1368

1369

1370

1371 if (!PredBB ||

1372 !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))

1373 return false;

1374

1377 bool OffsetIsScalable;

1378 if (TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, TRI))

1379 return false;

1380

1381 if (!BaseOp->isReg())

1382 return false;

1383

1384 if (!(MI.mayLoad() && MI.isPredicable()))

1385 return false;

1386

1387 MachineBranchPredicate MBP;

1388 if (TII->analyzeBranchPredicate(*PredMBB, MBP, false))

1389 return false;

1390

1391 return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&

1392 (MBP.Predicate == MachineBranchPredicate::PRED_NE ||

1393 MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&

1394 MBP.LHS.getReg() == BaseOp->getReg();

1395}

1396

1397

1398

1399

1400

1405

1406

1407

1408

1409

1410 const MachineOperand *SrcMO = nullptr, *DstMO = nullptr;

1411 auto CopyOperands = TII.isCopyInstr(SinkInst);

1412 if (!CopyOperands)

1413 return false;

1414 SrcMO = CopyOperands->Source;

1415 DstMO = CopyOperands->Destination;

1416

1417

1418 bool PostRA = MRI.getNumVirtRegs() == 0;

1419

1420

1422 return false;

1423

1424

1425

1426 bool arePhysRegs = Reg.isVirtual();

1427 if (arePhysRegs != PostRA)

1428 return false;

1429

1430

1431

1432 if (!PostRA)

1434 if (DbgMO.getSubReg() != SrcMO->getSubReg() ||

1435 DbgMO.getSubReg() != DstMO->getSubReg())

1436 return false;

1437

1438

1439

1440

1441 if (PostRA && Reg != DstMO->getReg())

1442 return false;

1443

1445 DbgMO.setReg(SrcMO->getReg());

1446 DbgMO.setSubReg(SrcMO->getSubReg());

1447 }

1448 return true;

1449}

1450

1451using MIRegs = std::pair<MachineInstr *, SmallVector<unsigned, 2>>;

1452

1456

1457

1458

1459 if (!SuccToSinkTo.empty() && InsertPos != SuccToSinkTo.end())

1461 InsertPos->getDebugLoc()));

1462 else

1464

1465

1467 SuccToSinkTo.splice(InsertPos, ParentBlock, MI,

1469

1470

1471

1472

1473

1474 for (const auto &DbgValueToSink : DbgValuesToSink) {

1477 SuccToSinkTo.insert(InsertPos, NewDbgMI);

1478

1479 bool PropagatedAllSunkOps = true;

1480 for (unsigned Reg : DbgValueToSink.second) {

1483 PropagatedAllSunkOps = false;

1484 break;

1485 }

1486 }

1487 }

1488 if (!PropagatedAllSunkOps)

1490 }

1491}

1492

1493

1494

1497

1498

1500 return true;

1501

1502 auto BlockPair = std::make_pair(From, To);

1503

1504

1505

1506 if (auto It = HasStoreCache.find(BlockPair); It != HasStoreCache.end())

1507 return It->second;

1508

1509 if (auto It = StoreInstrCache.find(BlockPair); It != StoreInstrCache.end())

1511 return I->mayAlias(AA, MI, false);

1512 });

1513

1514 bool SawStore = false;

1515 bool HasAliasedStore = false;

1518

1520

1521

1522

1523

1524 if (BB == To || BB == From)

1525 continue;

1526

1527

1528 if (HandledBlocks.count(BB))

1529 continue;

1530

1531 HandledBlocks.insert(BB);

1532

1534 if (!HandledDomBlocks.count(BB))

1535 HandledDomBlocks.insert(BB);

1536

1537

1538

1541 for (auto *DomBB : HandledDomBlocks) {

1542 if (DomBB != BB && DT->dominates(DomBB, BB))

1543 HasStoreCache[std::make_pair(DomBB, To)] = true;

1544 else if (DomBB != BB && DT->dominates(BB, DomBB))

1545 HasStoreCache[std::make_pair(From, DomBB)] = true;

1546 }

1547 HasStoreCache[BlockPair] = true;

1548 return true;

1549 }

1550

1552

1553

1554 if (I.isCall() || I.hasOrderedMemoryRef()) {

1555 for (auto *DomBB : HandledDomBlocks) {

1556 if (DomBB != BB && DT->dominates(DomBB, BB))

1557 HasStoreCache[std::make_pair(DomBB, To)] = true;

1558 else if (DomBB != BB && DT->dominates(BB, DomBB))

1559 HasStoreCache[std::make_pair(From, DomBB)] = true;

1560 }

1561 HasStoreCache[BlockPair] = true;

1562 return true;

1563 }

1564

1565 if (I.mayStore()) {

1566 SawStore = true;

1567

1568

1569

1570

1571 if (I.mayAlias(AA, MI, false))

1572 HasAliasedStore = true;

1573 StoreInstrCache[BlockPair].push_back(&I);

1574 }

1575 }

1576 }

1577 }

1578

1579 if (!SawStore)

1580 HasStoreCache[BlockPair] = false;

1581 return HasAliasedStore;

1582}

1583

1584

1585

1586

1588 LLVM_DEBUG(dbgs() << "CycleSink: Finding sink block for: " << I);

1590 assert(Preheader && "Cycle sink needs a preheader block");

1592 bool CanSink = true;

1594

1598 LLVM_DEBUG(dbgs() << "CycleSink: Use not in cycle, can't sink.\n");

1599 CanSink = false;

1600 break;

1601 }

1602

1603

1604

1605

1606

1607 if (MI.isCopy()) {

1608 LLVM_DEBUG(dbgs() << "CycleSink: Use is not a copy\n");

1609 CanSink = false;

1610 break;

1611 }

1612 if (!SinkBlock) {

1613 SinkBlock = MI.getParent();

1614 LLVM_DEBUG(dbgs() << "CycleSink: Setting sink block to: "

1616 continue;

1617 }

1619 if (!SinkBlock) {

1620 LLVM_DEBUG(dbgs() << "CycleSink: Can't find nearest dominator\n");

1621 CanSink = false;

1622 break;

1623 }

1624 LLVM_DEBUG(dbgs() << "CycleSink: Setting nearest common dom block: "

1626 }

1627

1628 if (!CanSink) {

1629 LLVM_DEBUG(dbgs() << "CycleSink: Can't sink instruction.\n");

1630 return false;

1631 }

1632 if (!SinkBlock) {

1633 LLVM_DEBUG(dbgs() << "CycleSink: Not sinking, can't find sink block.\n");

1634 return false;

1635 }

1636 if (SinkBlock == Preheader) {

1638 dbgs() << "CycleSink: Not sinking, sink block is the preheader\n");

1639 return false;

1640 }

1643 dbgs() << "CycleSink: Not Sinking, block too large to analyse.\n");

1644 return false;

1645 }

1646

1647 LLVM_DEBUG(dbgs() << "CycleSink: Sinking instruction!\n");

1649 I);

1650

1651

1654 RegsToClearKillFlags.insert(MO.getReg());

1655 }

1656

1657

1658

1659 assert(I.isDebugInstr() && "Should not sink debug inst");

1661 return true;

1662}

1663

1664

1665

1666bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,

1667 AllSuccsCache &AllSuccessors) {

1668

1670 return false;

1671

1672

1673 if (MI.isSafeToMove(SawStore))

1674 return false;

1675

1676

1677

1678 if (MI.isConvergent())

1679 return false;

1680

1681

1682

1684 return false;

1685

1686

1687

1688

1689

1690

1691

1692

1693

1694 bool BreakPHIEdge = false;

1697 FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge, AllSuccessors);

1698

1699

1700 if (!SuccToSinkTo)

1701 return false;

1702

1703

1704

1705

1708 if (Reg == 0 || Reg.isPhysical())

1709 continue;

1711 return false;

1712 }

1713

1714 LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccToSinkTo);

1715

1716

1717

1718 if (SuccToSinkTo->pred_size() > 1) {

1719

1720

1721 bool TryBreak = false;

1723 MI.mayLoad() ? hasStoreBetween(ParentBlock, SuccToSinkTo, MI) : true;

1724 if (MI.isSafeToMove(Store)) {

1725 LLVM_DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");

1726 TryBreak = true;

1727 }

1728

1729

1730

1731 if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {

1732 LLVM_DEBUG(dbgs() << " *** NOTE: Critical edge found\n");

1733 TryBreak = true;

1734 }

1735

1736

1737 if (!TryBreak && CI->getCycle(SuccToSinkTo) &&

1740 LLVM_DEBUG(dbgs() << " *** NOTE: cycle header found\n");

1741 TryBreak = true;

1742 }

1743

1744

1745 if (!TryBreak)

1746 LLVM_DEBUG(dbgs() << "Sinking along critical edge.\n");

1747 else {

1748

1749

1750

1751 bool Status = PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo,

1752 BreakPHIEdge);

1754 LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "

1755 "break critical edge\n");

1756

1757 return false;

1758 }

1759 }

1760

1761 if (BreakPHIEdge) {

1762

1763

1764

1766 PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);

1768 LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "

1769 "break critical edge\n");

1770

1771 return false;

1772 }

1773

1774

1778 LLVM_DEBUG(dbgs() << " *** Not sinking: prologue interference\n");

1779 return false;

1780 }

1781

1782

1784 for (auto &MO : MI.all_defs()) {

1786 continue;

1787 auto It = SeenDbgUsers.find(MO.getReg());

1788 if (It == SeenDbgUsers.end())

1789 continue;

1790

1791

1792 auto &Users = It->second;

1795 if (User.getInt()) {

1796

1797

1800 } else {

1803 }

1804 }

1805 }

1806

1807

1808

1809

1810 if (MI.getMF()->getFunction().getSubprogram() && MI.isCopy())

1811 SalvageUnsunkDebugUsersOfCopy(MI, SuccToSinkTo);

1812

1813 performSink(MI, *SuccToSinkTo, InsertPos, DbgUsersToSink);

1814

1815

1816

1817

1818

1819

1821 RegsToClearKillFlags.insert(MO.getReg());

1822

1823 return true;

1824}

1825

1826void MachineSinking::SalvageUnsunkDebugUsersOfCopy(

1829 assert(MI.getOperand(1).isReg());

1830

1831

1832

1833

1837 for (auto &MO : MI.all_defs()) {

1839 continue;

1841 for (auto &User : MRI.use_instructions(MO.getReg())) {

1842 if (User.isDebugValue() || DT->dominates(TargetBlock, User.getParent()))

1843 continue;

1844

1845

1846 if (User.getParent() == MI.getParent())

1847 continue;

1848

1850 "DBG_VALUE user of vreg, but has no operand for it?");

1852 }

1853 }

1854

1855

1856

1857 for (auto *User : DbgDefUsers) {

1858 for (auto &Reg : DbgUseRegs) {

1859 for (auto &DbgOp : User->getDebugOperandsForReg(Reg)) {

1860 DbgOp.setReg(MI.getOperand(1).getReg());

1861 DbgOp.setSubReg(MI.getOperand(1).getSubReg());

1862 }

1863 }

1864 }

1865}

1866

1867

1868

1869

1870

1871

1872

1873

1874

1875

1876

1877

1878

1879

1880

1881

1882

1883

1884

1885

1886

1887

1888

1889

1890

1891

1892

1893

1894

1895

1896

1897

1898

1899

1900

1901namespace {

1902

1904public:

1906

1907 static char ID;

1910

1914 }

1915

1918 MachineFunctionProperties::Property::NoVRegs);

1919 }

1920

1921private:

1922

1923 LiveRegUnits ModifiedRegUnits, UsedRegUnits;

1924

1925

1926

1927

1928

1930

1931

1932

1935};

1936}

1937

1938char PostRAMachineSinking::ID = 0;

1940

1942 "PostRA Machine Sink", false, false)

1943

1949}

1950

1955

1957 for (auto *SI : SinkableBBs) {

1958 if (aliasWithRegsInLiveIn(*SI, Reg, TRI)) {

1959

1960

1961 if (BB)

1962 return nullptr;

1963 BB = SI;

1964 }

1965 }

1966

1967 if (!BB)

1968 return nullptr;

1969

1970

1971 for (auto *SI : CurBB.successors()) {

1972 if (!SinkableBBs.count(SI) && aliasWithRegsInLiveIn(*SI, Reg, TRI))

1973 return nullptr;

1974 }

1975 return BB;

1976}

1977

1984 for (auto DefReg : DefedRegsInCopy) {

1987 if (!BB || (SingleBB && SingleBB != BB))

1988 return nullptr;

1989 SingleBB = BB;

1990 }

1991 return SingleBB;

1992}

1993

1998 for (auto U : UsedOpsInCopy) {

2001 if (!UsedRegUnits.available(SrcReg)) {

2004 if (UI.killsRegister(SrcReg, TRI)) {

2005 UI.clearRegisterKills(SrcReg, TRI);

2007 break;

2008 }

2009 }

2010 }

2011 }

2012}

2013

2019 for (unsigned DefReg : DefedRegsInCopy)

2020 for (MCPhysReg S : TRI->subregs_inclusive(DefReg))

2022 for (auto U : UsedOpsInCopy)

2023 SuccBB->addLiveIn(MI->getOperand(U).getReg());

2025}

2026

2032 bool HasRegDependency = false;

2033 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {

2035 if (!MO.isReg())

2036 continue;

2038 if (Reg)

2039 continue;

2040 if (MO.isDef()) {

2042 HasRegDependency = true;

2043 break;

2044 }

2046

2047

2048

2049

2050

2051 } else if (MO.isUse()) {

2053 HasRegDependency = true;

2054 break;

2055 }

2057 }

2058 }

2059 return HasRegDependency;

2060}

2061

2062bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,

2067

2068

2069

2071 if (SI->livein_empty() && SI->pred_size() == 1)

2072 SinkableBBs.insert(SI);

2073

2074 if (SinkableBBs.empty())

2075 return false;

2076

2077 bool Changed = false;

2078

2079

2080

2081 ModifiedRegUnits.clear();

2082 UsedRegUnits.clear();

2083 SeenDbgInstrs.clear();

2084

2086

2088

2090

2091

2092

2093 if (MI.isDebugValue() && MI.isDebugRef()) {

2095 bool IsValid = true;

2098

2099

2101 ModifiedRegUnits, UsedRegUnits)) {

2102 IsValid = false;

2103 break;

2104 }

2105

2106

2108 MIUnits[Unit].push_back(MO.getReg());

2109 }

2110 }

2111 if (IsValid) {

2112 for (auto &RegOps : MIUnits)

2113 SeenDbgInstrs[RegOps.first].emplace_back(&MI,

2114 std::move(RegOps.second));

2115 }

2116 continue;

2117 }

2118

2119 if (MI.isDebugOrPseudoInstr())

2120 continue;

2121

2122

2123 if (MI.isCall())

2124 return false;

2125

2126 if (MI.isCopy() || MI.getOperand(0).isRenamable()) {

2129 continue;

2130 }

2131

2132

2134 ModifiedRegUnits, UsedRegUnits)) {

2137 continue;

2138 }

2139 assert((!UsedOpsInCopy.empty() && !DefedRegsInCopy.empty()) &&

2140 "Unexpect SrcReg or DefReg");

2143

2144

2145 if (!SuccBB) {

2148 continue;

2149 }

2151 "Unexpected predecessor");

2152

2153

2154

2155

2157 for (auto &MO : MI.all_defs()) {

2159 for (const auto &MIRegs : SeenDbgInstrs.lookup(Unit)) {

2160 auto &Regs = DbgValsToSinkMap[MIRegs.first];

2161 for (unsigned Reg : MIRegs.second)

2162 Regs.push_back(Reg);

2163 }

2164 }

2165 }

2166 auto DbgValsToSink = DbgValsToSinkMap.takeVector();

2167

2168 LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccBB);

2169

2175 LLVM_DEBUG(dbgs() << " *** Not sinking: prologue interference\n");

2176 continue;

2177 }

2178

2179

2180

2182 performSink(MI, *SuccBB, InsertPos, DbgValsToSink);

2183 updateLiveIn(&MI, SuccBB, UsedOpsInCopy, DefedRegsInCopy);

2184

2185 Changed = true;

2186 ++NumPostRACopySink;

2187 }

2188 return Changed;

2189}

2190

2191bool PostRAMachineSinking::runOnMachineFunction(MachineFunction &MF) {

2193 return false;

2194

2195 bool Changed = false;

2198

2199 ModifiedRegUnits.init(*TRI);

2200 UsedRegUnits.init(*TRI);

2201 for (auto &BB : MF)

2202 Changed |= tryToSinkCopy(BB, MF, TRI, TII);

2203

2204 return Changed;

2205}

unsigned const MachineRegisterInfo * MRI

MachineInstrBuilder MachineInstrBuilder & DefMI

BlockVerifier::State From

COFF::MachineTypes Machine

This file defines the DenseSet and SmallDenseSet classes.

This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.

static const HTTPClientCleanup Cleanup

const HexagonInstrInfo * TII

iv Induction Variable Users

static cl::opt< unsigned > SinkLoadInstsPerBlockThreshold("machine-sink-load-instrs-threshold", cl::desc("Do not try to find alias store for a load if there is a in-path " "block whose instruction number is higher than this threshold."), cl::init(2000), cl::Hidden)

static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB, SmallVectorImpl< unsigned > &UsedOpsInCopy, LiveRegUnits &UsedRegUnits, const TargetRegisterInfo *TRI)

static cl::opt< unsigned > SinkIntoCycleLimit("machine-sink-cycle-limit", cl::desc("The maximum number of instructions considered for cycle sinking."), cl::init(50), cl::Hidden)

unsigned const TargetRegisterInfo * TRI

static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo, MachineBasicBlock::iterator InsertPos, ArrayRef< MIRegs > DbgValuesToSink)

Sink an instruction and its associated debug instructions.

static cl::opt< bool > SplitEdges("machine-sink-split", cl::desc("Split critical edges during machine sinking"), cl::init(true), cl::Hidden)

static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI)

Return true if this machine instruction loads from global offset table or constant pool.

static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)

Return true if MI is likely to be usable as a memory operation by the implicit null check optimizatio...

static cl::opt< bool > SinkInstsIntoCycle("sink-insts-to-avoid-spills", cl::desc("Sink instructions into cycles to avoid " "register spills"), cl::init(false), cl::Hidden)

static cl::opt< unsigned > SinkLoadBlocksThreshold("machine-sink-load-blocks-threshold", cl::desc("Do not try to find alias store for a load if the block number in " "the straight line is higher than this threshold."), cl::init(20), cl::Hidden)

static bool hasRegisterDependency(MachineInstr *MI, SmallVectorImpl< unsigned > &UsedOpsInCopy, SmallVectorImpl< unsigned > &DefedRegsInCopy, LiveRegUnits &ModifiedRegUnits, LiveRegUnits &UsedRegUnits)

static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB, SmallVectorImpl< unsigned > &UsedOpsInCopy, SmallVectorImpl< unsigned > &DefedRegsInCopy)

Machine code static false bool blockPrologueInterferes(const MachineBasicBlock *BB, MachineBasicBlock::const_iterator End, const MachineInstr &MI, const TargetRegisterInfo *TRI, const TargetInstrInfo *TII, const MachineRegisterInfo *MRI)

Return true if a target defined block prologue instruction interferes with a sink candidate.

static cl::opt< unsigned > SplitEdgeProbabilityThreshold("machine-sink-split-probability-threshold", cl::desc("Percentage threshold for splitting single-instruction critical edge. " "If the branch threshold is higher than this threshold, we allow " "speculative execution of up to 1 instruction to avoid branching to " "splitted critical edge"), cl::init(40), cl::Hidden)

static bool attemptDebugCopyProp(MachineInstr &SinkInst, MachineInstr &DbgMI, Register Reg)

If the sunk instruction is a copy, try to forward the copy instead of leaving an 'undef' DBG_VALUE in...

static MachineBasicBlock * getSingleLiveInSuccBB(MachineBasicBlock &CurBB, const SmallPtrSetImpl< MachineBasicBlock * > &SinkableBBs, unsigned Reg, const TargetRegisterInfo *TRI)

static cl::opt< bool > UseBlockFreqInfo("machine-sink-bfi", cl::desc("Use block frequency info to find successors to sink"), cl::init(true), cl::Hidden)

std::pair< MachineInstr *, SmallVector< unsigned, 2 > > MIRegs

This file implements a map that provides insertion order iteration.

#define INITIALIZE_PASS_DEPENDENCY(depName)

#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)

#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)

#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)

This file defines the PointerIntPair class.

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

This file implements a set that has insertion order iteration characteristics.

static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI, AAResults &AA)

static bool SinkInstruction(Instruction *Inst, SmallPtrSetImpl< Instruction * > &Stores, DominatorTree &DT, LoopInfo &LI, AAResults &AA)

SinkInstruction - Determine whether it is safe to sink the specified machine instruction out of its c...

This file defines the SmallSet class.

This file defines the SmallVector class.

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

Target-Independent Code Generator Pass Configuration Options pass.

A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.

Represent the analysis usage information of a pass.

AnalysisUsage & addRequired()

AnalysisUsage & addPreserved()

Add the specified Pass class to the set of analyses preserved by this pass.

void setPreservesCFG()

This function should be called by the pass, iff they do not:

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)

When two instructions are combined into a single instruction we also need to combine the original loc...

Identifies a unique instance of a variable.

iterator find(const_arg_type_t< KeyT > Val)

std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)

std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)

Implements a dense probed hash-table based set.

Base class for the actual dominator tree node.

iterator_range< iterator > children()

bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const

dominates - Returns true iff A dominates B.

DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const

getNode - return the (Post)DominatorTree node for the specified basic block.

bool isReachableFromEntry(const Use &U) const

Provide an overload for a Use.

Instruction * findNearestCommonDominator(Instruction *I1, Instruction *I2) const

Find the nearest instruction I that dominates both I1 and I2, in the sense that a result produced bef...

bool dominates(const BasicBlock *BB, const Use &U) const

Return true if the (end of the) basic block BB dominates the use U.

iterator_range< const_toplevel_iterator > toplevel_cycles() const

void splitCriticalEdge(BlockT *Pred, BlockT *Succ, BlockT *New)

unsigned getCycleDepth(const BlockT *Block) const

get the depth for the cycle which containing a given block.

CycleT * getCycle(const BlockT *Block) const

Find the innermost cycle containing a given block.

A possibly irreducible generalization of a Loop.

BlockT * getHeader() const

bool isReducible() const

Whether the cycle is a natural loop.

BlockT * getCyclePreheader() const

Return the preheader block for this cycle.

bool contains(const BlockT *Block) const

Return whether Block is contained in the cycle.

bool isAsCheapAsAMove(const MachineInstr &MI) const override

bool shouldSink(const MachineInstr &MI) const override

A set of register units used to track register liveness.

static void accumulateUsedDefed(const MachineInstr &MI, LiveRegUnits &ModifiedRegUnits, LiveRegUnits &UsedRegUnits, const TargetRegisterInfo *TRI)

For a machine instruction MI, adds all register units used in UsedRegUnits and defined or clobbered i...

bool available(MCPhysReg Reg) const

Returns true if no part of physical register Reg is live.

void addLiveIns(const MachineBasicBlock &MBB)

Adds registers living into block MBB.

bool isInlineAsmBrIndirectTarget() const

Returns true if this is the indirect dest of an INLINEASM_BR.

unsigned pred_size() const

bool isEHPad() const

Returns true if the block is a landing pad.

instr_iterator instr_begin()

instr_iterator insert(instr_iterator I, MachineInstr *M)

Insert MI into the instruction list before I, possibly inside a bundle.

iterator SkipPHIsAndLabels(iterator I)

Return the first instruction in MBB after I that is not a PHI or a label.

void removeLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll())

Remove the specified register from the live in set.

unsigned succ_size() const

void sortUniqueLiveIns()

Sorts and uniques the LiveIns vector.

pred_iterator pred_begin()

instr_iterator instr_end()

void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())

Adds the specified register as a live in.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

iterator_range< succ_iterator > successors()

bool isSuccessor(const MachineBasicBlock *MBB) const

Return true if the specified MBB is a successor of this block.

iterator_range< pred_iterator > predecessors()

void splice(iterator Where, MachineBasicBlock *Other, iterator From)

Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...

bool sizeWithoutDebugLargerThan(unsigned Limit) const

bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const

Return true if the specified register is in the live in set.

MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...

BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const

getblockFreq - Return block frequency.

void onEdgeSplit(const MachineBasicBlock &NewPredecessor, const MachineBasicBlock &NewSuccessor, const MachineBranchProbabilityInfo &MBPI)

incrementally calculate block frequencies when we split edges, to avoid full CFG traversal.

BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const

Legacy analysis pass which computes a MachineCycleInfo.

Analysis pass which computes a MachineDominatorTree.

DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...

MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.

virtual bool runOnMachineFunction(MachineFunction &MF)=0

runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...

virtual MachineFunctionProperties getRequiredProperties() const

Properties which a MachineFunction may have at a given point in time.

MachineFunctionProperties & set(Property P)

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

Function & getFunction()

Return the LLVM function that this machine code represents.

MachineInstr * CloneMachineInstr(const MachineInstr *Orig)

Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...

Representation of each machine instruction.

static iterator_range< filter_iterator< Operand *, std::function< bool(Operand &Op)> > > getDebugOperandsForReg(Instruction *MI, Register Reg)

Returns a range of all of the operands that correspond to a debug use of Reg.

bool hasDebugOperandForReg(Register Reg) const

Returns whether this debug value has at least one debug operand with the register Reg.

void setDebugValueUndef()

Sets all register debug operands in this debug value instruction to be undef.

bool mayLoadOrStore(QueryType Type=AnyInBundle) const

Return true if this instruction could possibly read or modify memory.

const MachineBasicBlock * getParent() const

bool isCopyLike() const

Return true if the instruction behaves like a copy.

const MachineFunction * getMF() const

Return the function that contains the basic block that this instruction belongs to.

const MachineOperand & getOperand(unsigned i) const

A description of a memory reference used in the backend.

MachineOperand class - Representation of each machine instruction operand.

unsigned getSubReg() const

bool readsReg() const

readsReg - Returns true if this operand reads the previous value of its register.

bool isReg() const

isReg - Tests if this is a MO_Register operand.

MachineBasicBlock * getMBB() const

void setIsKill(bool Val=true)

MachineInstr * getParent()

getParent - Return the instruction that this operand belongs to.

Register getReg() const

getReg - Returns the register number.

MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

This class implements a map that also provides access to all stored values in a deterministic order.

VectorType takeVector()

Clear the MapVector and return the underlying vector.

static PassRegistry * getPassRegistry()

getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...

virtual StringRef getPassName() const

getPassName - Return a nice clean name for a pass.

virtual void releaseMemory()

releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...

PointerIntPair - This class implements a pair of a pointer and small integer.

An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.

Analysis providing profile information.

Special value supplied for machine level alias analysis.

Track the current register pressure at some position in the instruction stream, and remember the high...

unsigned getRegPressureSetLimit(unsigned Idx) const

Get the register unit limit for the given pressure set index.

void runOnMachineFunction(const MachineFunction &MF)

runOnFunction - Prepare to answer questions about MF.

List of registers defined and used by a machine instruction.

void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, bool TrackLaneMasks, bool IgnoreDead)

Analyze the given instruction MI and fill in the Uses, Defs and DeadDefs list based on the MachineOpe...

Wrapper class representing virtual and physical registers.

constexpr bool isVirtual() const

Return true if the specified register number is in the virtual register namespace.

constexpr bool isPhysical() const

Return true if the specified register number is in the physical register namespace.

A vector that has set insertion semantics.

void clear()

Completely clear the SetVector.

A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...

std::pair< const_iterator, bool > insert(const T &V)

insert - Insert an element into the set if it isn't already there.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

StringRef - Represent a constant reference to a string, i.e.

TargetInstrInfo - Interface to description of machine instruction set.

Target-Independent Code Generator Pass Configuration Options.

bool getEnableSinkAndFold() const

bool contains(Register Reg) const

Return true if the specified register is included in this register class.

bool hasSuperClassEq(const TargetRegisterClass *RC) const

Returns true if RC is a super-class of or equal to this class.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

TargetSubtargetInfo - Generic base class for all target subtargets.

virtual const TargetRegisterInfo * getRegisterInfo() const

getRegisterInfo - If register information is available, return it.

virtual const TargetInstrInfo * getInstrInfo() const

std::pair< iterator, bool > insert(const ValueT &V)

bool contains(const_arg_type_t< ValueT > V) const

Check if the set contains the given element.

size_type count(const_arg_type_t< ValueT > V) const

Return 1 if the specified key is in the set, 0 otherwise.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

void stable_sort(R &&Range)

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

bool isCycleInvariant(const MachineCycle *Cycle, MachineInstr &I)

char & MachineSinkingID

MachineSinking - This pass performs sinking on machine instructions.

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)

Returns true if machine function MF is suggested to be size-optimized based on the profile.

iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)

Make a range that does early increment to allow mutation of the underlying range without disrupting i...

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

auto reverse(ContainerTy &&C)

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

char & PostRAMachineSinkingID

This pass perform post-ra machine sink for COPY instructions.

void initializeMachineSinkingPass(PassRegistry &)

iterator_range< df_iterator< T > > depth_first(const T &G)

Printable printMBBReference(const MachineBasicBlock &MBB)

Prints a machine basic block reference.

TODO: Might pack better if we changed this to a Struct of Arrays, since MachineOperand is width 32,...

Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.

RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.

Represents a predicate at the MachineFunction level.