LLVM: lib/CodeGen/MachineCombiner.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

34

35using namespace llvm;

36

37#define DEBUG_TYPE "machine-combiner"

38

39STATISTIC(NumInstCombined, "Number of machineinst combined");

40

43 cl::desc("Incremental depth computation will be used for basic "

44 "blocks with more instructions."), cl::init(500));

45

47 cl::desc("Dump all substituted intrs"),

49

50#ifdef EXPENSIVE_CHECKS

52 "machine-combiner-verify-pattern-order", cl::Hidden,

54 "Verify that the generated patterns are ordered by increasing latency"),

56#else

58 "machine-combiner-verify-pattern-order", cl::Hidden,

60 "Verify that the generated patterns are ordered by increasing latency"),

62#endif

63

64namespace {

71 MachineLoopInfo *MLI = nullptr;

77

79

80public:

81 static char ID;

84 }

85 void getAnalysisUsage(AnalysisUsage &AU) const override;

86 bool runOnMachineFunction(MachineFunction &MF) override;

87 StringRef getPassName() const override { return "Machine InstCombiner"; }

88

89private:

90 bool combineInstructions(MachineBasicBlock *);

91 MachineInstr *getOperandDef(const MachineOperand &MO);

92 bool isTransientMI(const MachineInstr *MI);

93 unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,

94 DenseMap<Register, unsigned> &InstrIdxForVirtReg,

96 const MachineBasicBlock &MBB);

97 unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot,

99 bool improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root,

101 SmallVectorImpl<MachineInstr *> &InsInstrs,

102 SmallVectorImpl<MachineInstr *> &DelInstrs,

103 DenseMap<Register, unsigned> &InstrIdxForVirtReg,

104 unsigned Pattern, bool SlackIsAccurate);

105 bool reduceRegisterPressure(MachineInstr &Root, MachineBasicBlock *MBB,

106 SmallVectorImpl<MachineInstr *> &InsInstrs,

107 SmallVectorImpl<MachineInstr *> &DelInstrs,

108 unsigned Pattern);

109 bool preservesResourceLen(MachineBasicBlock *MBB,

111 SmallVectorImpl<MachineInstr *> &InsInstrs,

112 SmallVectorImpl<MachineInstr *> &DelInstrs);

113 void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs,

114 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC);

115 std::pair<unsigned, unsigned>

116 getLatenciesForInstrSequences(MachineInstr &MI,

117 SmallVectorImpl<MachineInstr *> &InsInstrs,

118 SmallVectorImpl<MachineInstr *> &DelInstrs,

120

121 void verifyPatternOrder(MachineBasicBlock *MBB, MachineInstr &Root,

122 SmallVector<unsigned, 16> &Patterns);

124};

125}

126

127char MachineCombiner::ID = 0;

129

131 "Machine InstCombiner", false, false)

136

137void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const {

138 AU.setPreservesCFG();

147}

148

150MachineCombiner::getOperandDef(const MachineOperand &MO) {

151 MachineInstr *DefInstr = nullptr;

152

154 DefInstr = MRI->getUniqueVRegDef(MO.getReg());

155 return DefInstr;

156}

157

158

159bool MachineCombiner::isTransientMI(const MachineInstr *MI) {

160 if (MI->isCopy())

161 return MI->isTransient();

162

163

164 Register Dst = MI->getOperand(0).getReg();

165 Register Src = MI->getOperand(1).getReg();

166

167 if (MI->isFullCopy()) {

168

169 if (MI->getOperand(0).getSubReg() || Src.isPhysical() || Dst.isPhysical())

170 return false;

171

172 auto SrcSub = MI->getOperand(1).getSubReg();

173 auto SrcRC = MRI->getRegClass(Src);

174 auto DstRC = MRI->getRegClass(Dst);

175 return TRI->getMatchingSuperRegClass(SrcRC, DstRC, SrcSub) != nullptr;

176 }

177

178 if (Src.isPhysical() && Dst.isPhysical())

179 return Src == Dst;

180

181 if (Src.isVirtual() && Dst.isVirtual()) {

182 auto SrcRC = MRI->getRegClass(Src);

183 auto DstRC = MRI->getRegClass(Dst);

184 return SrcRC->hasSuperClassEq(DstRC) || SrcRC->hasSubClassEq(DstRC);

185 }

186

187 if (Src.isVirtual())

189

190

191 auto DstRC = MRI->getRegClass(Dst);

192 return DstRC->contains(Src);

193}

194

195

196

197

198

199

200

201

202

203unsigned

204MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,

205 DenseMap<Register, unsigned> &InstrIdxForVirtReg,

207 const MachineBasicBlock &MBB) {

208 SmallVector<unsigned, 16> InstrDepth;

209

210

211

212 for (auto *InstrPtr : InsInstrs) {

213 unsigned IDepth = 0;

214 for (const MachineOperand &MO : InstrPtr->all_uses()) {

215

217 continue;

218 unsigned DepthOp = 0;

219 unsigned LatencyOp = 0;

220 DenseMap<Register, unsigned>::iterator II =

221 InstrIdxForVirtReg.find(MO.getReg());

222 if (II != InstrIdxForVirtReg.end()) {

223

224 assert(II->second < InstrDepth.size() && "Bad Index");

225 MachineInstr *DefInstr = InsInstrs[II->second];

227 "There must be a definition for a new virtual register");

228 DepthOp = InstrDepth[II->second];

229 int DefIdx =

231 int UseIdx =

232 InstrPtr->findRegisterUseOperandIdx(MO.getReg(), nullptr);

234 InstrPtr, UseIdx);

235 } else {

236 MachineInstr *DefInstr = getOperandDef(MO);

238 MachineTraceStrategy::TS_Local ||

241 if (!isTransientMI(DefInstr))

243 DefInstr,

245 nullptr),

246 InstrPtr,

247 InstrPtr->findRegisterUseOperandIdx(MO.getReg(),

248 nullptr));

249 }

250 }

251 IDepth = std::max(IDepth, DepthOp + LatencyOp);

252 }

254 }

255 unsigned NewRootIdx = InsInstrs.size() - 1;

256 return InstrDepth[NewRootIdx];

257}

258

259

260

261

262

263

264

265

266

267

268unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot,

270

271 unsigned NewRootLatency = 0;

272

273 for (const MachineOperand &MO : NewRoot->all_defs()) {

274

276 continue;

277

279 RI++;

280 if (RI == MRI->reg_end())

281 continue;

282 MachineInstr *UseMO = RI->getParent();

283 unsigned LatencyOp = 0;

284 if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) {

286 NewRoot,

288 UseMO,

290 } else {

291 LatencyOp = TSchedModel.computeInstrLatency(NewRoot);

292 }

293 NewRootLatency = std::max(NewRootLatency, LatencyOp);

294 }

295 return NewRootLatency;

296}

297

298CombinerObjective MachineCombiner::getCombinerObjective(unsigned Pattern) {

299

300

301 switch (Pattern) {

302 case MachineCombinerPattern::REASSOC_AX_BY:

303 case MachineCombinerPattern::REASSOC_AX_YB:

304 case MachineCombinerPattern::REASSOC_XA_BY:

305 case MachineCombinerPattern::REASSOC_XA_YB:

306 return CombinerObjective::MustReduceDepth;

307 default:

309 }

310}

311

312

313

314

315

316std::pair<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences(

317 MachineInstr &MI, SmallVectorImpl<MachineInstr *> &InsInstrs,

318 SmallVectorImpl<MachineInstr *> &DelInstrs,

320 assert(!InsInstrs.empty() && "Only support sequences that insert instrs.");

321 unsigned NewRootLatency = 0;

322

323 MachineInstr *NewRoot = InsInstrs.back();

324 for (unsigned i = 0; i < InsInstrs.size() - 1; i++)

325 NewRootLatency += TSchedModel.computeInstrLatency(InsInstrs[i]);

326 NewRootLatency += getLatency(&MI, NewRoot, BlockTrace);

327

328 unsigned RootLatency = 0;

329 for (auto *I : DelInstrs)

330 RootLatency += TSchedModel.computeInstrLatency(I);

331

332 return {NewRootLatency, RootLatency};

333}

334

335bool MachineCombiner::reduceRegisterPressure(

336 MachineInstr &Root, MachineBasicBlock *MBB,

337 SmallVectorImpl<MachineInstr *> &InsInstrs,

338 SmallVectorImpl<MachineInstr *> &DelInstrs, unsigned Pattern) {

339

340

341

342

343 return true;

344}

345

346

347

348

349

350

351bool MachineCombiner::improvesCriticalPathLen(

352 MachineBasicBlock *MBB, MachineInstr *Root,

354 SmallVectorImpl<MachineInstr *> &InsInstrs,

355 SmallVectorImpl<MachineInstr *> &DelInstrs,

356 DenseMap<Register, unsigned> &InstrIdxForVirtReg, unsigned Pattern,

357 bool SlackIsAccurate) {

358

359 unsigned NewRootDepth =

360 getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace, *MBB);

362

363 LLVM_DEBUG(dbgs() << " Dependence data for " << *Root << "\tNewRootDepth: "

364 << NewRootDepth << "\tRootDepth: " << RootDepth);

365

366

367

368

369

370

371 if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth) {

374 ? dbgs() << "\t and it does it\n"

375 : dbgs() << "\t but it does NOT do it\n");

376 return NewRootDepth < RootDepth;

377 }

378

379

380

381

382

383

384 unsigned NewRootLatency, RootLatency;

386 std::tie(NewRootLatency, RootLatency) =

387 getLatenciesForInstrSequences(*Root, InsInstrs, DelInstrs, BlockTrace);

388 } else {

389 NewRootLatency = TSchedModel.computeInstrLatency(InsInstrs.back());

390 RootLatency = TSchedModel.computeInstrLatency(Root);

391 }

392

393 unsigned RootSlack = BlockTrace.getInstrSlack(*Root);

394 unsigned NewCycleCount = NewRootDepth + NewRootLatency;

395 unsigned OldCycleCount =

396 RootDepth + RootLatency + (SlackIsAccurate ? RootSlack : 0);

397 LLVM_DEBUG(dbgs() << "\n\tNewRootLatency: " << NewRootLatency

398 << "\tRootLatency: " << RootLatency << "\n\tRootSlack: "

399 << RootSlack << " SlackIsAccurate=" << SlackIsAccurate

400 << "\n\tNewRootDepth + NewRootLatency = " << NewCycleCount

401 << "\n\tRootDepth + RootLatency + RootSlack = "

402 << OldCycleCount);

403 LLVM_DEBUG(NewCycleCount <= OldCycleCount

404 ? dbgs() << "\n\t It IMPROVES PathLen because"

405 : dbgs() << "\n\t It DOES NOT improve PathLen because");

406 LLVM_DEBUG(dbgs() << "\n\t\tNewCycleCount = " << NewCycleCount

407 << ", OldCycleCount = " << OldCycleCount << "\n");

408

409 return NewCycleCount <= OldCycleCount;

410}

411

412

413void MachineCombiner::instr2instrSC(

414 SmallVectorImpl<MachineInstr *> &Instrs,

415 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) {

416 for (auto *InstrPtr : Instrs) {

417 unsigned Opc = InstrPtr->getOpcode();

421 }

422}

423

424

425bool MachineCombiner::preservesResourceLen(

427 SmallVectorImpl<MachineInstr *> &InsInstrs,

428 SmallVectorImpl<MachineInstr *> &DelInstrs) {

430 return true;

431

432

433

434

437 unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr);

438

439

442

443 instr2instrSC(InsInstrs, InsInstrsSC);

444 instr2instrSC(DelInstrs, DelInstrsSC);

445

448

449

450 unsigned ResLenAfterCombine =

452

453 LLVM_DEBUG(dbgs() << "\t\tResource length before replacement: "

454 << ResLenBeforeCombine

455 << " and after: " << ResLenAfterCombine << "\n");

457 ResLenAfterCombine <=

459 ? dbgs() << "\t\t As result it IMPROVES/PRESERVES Resource Length\n"

460 : dbgs() << "\t\t As result it DOES NOT improve/preserve Resource "

461 "Length\n");

462

463 return ResLenAfterCombine <=

465}

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480static void

486 unsigned Pattern, bool IncrementalUpdate) {

487

488

489

490

491

492

493

494 TII->finalizeInsInstrs(MI, Pattern, InsInstrs);

495

496 for (auto *InstrPtr : InsInstrs)

498

499 for (auto *InstrPtr : DelInstrs) {

500 InstrPtr->eraseFromParent();

501

502 for (auto *I = RegUnits.begin(); I != RegUnits.end();) {

503 if (I->MI == InstrPtr)

505 else

506 I++;

507 }

508 }

509

510 if (IncrementalUpdate)

511 for (auto *InstrPtr : InsInstrs)

512 TraceEnsemble->updateDepth(MBB, *InstrPtr, RegUnits);

513 else

515

516 NumInstCombined++;

517}

518

519

520

521void MachineCombiner::verifyPatternOrder(MachineBasicBlock *MBB,

522 MachineInstr &Root,

523 SmallVector<unsigned, 16> &Patterns) {

524 long PrevLatencyDiff = std::numeric_limits::max();

525 (void)PrevLatencyDiff;

526 for (auto P : Patterns) {

529 DenseMap<Register, unsigned> InstrIdxForVirtReg;

531 InstrIdxForVirtReg);

532

533

534

536 continue;

537

538 unsigned NewRootLatency, RootLatency;

539 std::tie(NewRootLatency, RootLatency) = getLatenciesForInstrSequences(

540 Root, InsInstrs, DelInstrs, TraceEnsemble->getTrace(MBB));

541 long CurrentLatencyDiff = ((long)RootLatency) - ((long)NewRootLatency);

542 assert(CurrentLatencyDiff <= PrevLatencyDiff &&

543 "Current pattern is better than previous pattern.");

544 PrevLatencyDiff = CurrentLatencyDiff;

545 }

546}

547

548

549

550

551

552

553

554

555bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {

558

559 bool IncrementalUpdate = false;

560 auto BlockIter = MBB->begin();

561 decltype(BlockIter) LastUpdate;

562

564 if (!TraceEnsemble)

566

569

571

572 bool DoRegPressureReduce =

574

575 while (BlockIter != MBB->end()) {

576 auto &MI = *BlockIter++;

577 SmallVector<unsigned, 16> Patterns;

578

579

580

581

582

583

584

585

586

587

588

589

590

591

592

593

594

595

596

597

598

599

600

601

602

603

604

606 continue;

607

609 verifyPatternOrder(MBB, MI, Patterns);

610

611 for (const auto P : Patterns) {

614 DenseMap<Register, unsigned> InstrIdxForVirtReg;

616 InstrIdxForVirtReg);

617

618

619

620 if (InsInstrs.empty())

621 continue;

622

624 dbgs() << "\tFor the Pattern (" << (int)P

625 << ") these instructions could be removed\n";

626 for (auto const *InstrPtr : DelInstrs)

627 InstrPtr->print(dbgs(), false, false,

628 false, true, TII);

629 dbgs() << "\tThese instructions could replace the removed ones\n";

630 for (auto const *InstrPtr : InsInstrs)

631 InstrPtr->print(dbgs(), false, false,

632 false, true, TII);

633 });

634

635 if (IncrementalUpdate && LastUpdate != BlockIter) {

636

637 TraceEnsemble->updateDepths(LastUpdate, BlockIter, RegUnits);

638 LastUpdate = BlockIter;

639 }

640

641 if (DoRegPressureReduce &&

642 getCombinerObjective(P) ==

643 CombinerObjective::MustReduceRegisterPressure) {

645

646 IncrementalUpdate = true;

647 LastUpdate = BlockIter;

648 }

649 if (reduceRegisterPressure(MI, MBB, InsInstrs, DelInstrs, P)) {

650

652 RegUnits, TII, P, IncrementalUpdate);

654

655

656

657 BlockIter--;

658 break;

659 }

660 }

661

663 LLVM_DEBUG(dbgs() << "\t Replacing due to throughput pattern in loop\n");

665 RegUnits, TII, P, IncrementalUpdate);

666

668 break;

669 } else if (OptForSize && InsInstrs.size() < DelInstrs.size()) {

670 LLVM_DEBUG(dbgs() << "\t Replacing due to OptForSize ("

671 << InsInstrs.size() << " < "

672 << DelInstrs.size() << ")\n");

674 RegUnits, TII, P, IncrementalUpdate);

675

677 break;

678 } else {

679

680

681

682

683

686 if (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, DelInstrs,

687 InstrIdxForVirtReg, P,

688 !IncrementalUpdate) &&

689 preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs)) {

691

692 IncrementalUpdate = true;

693 LastUpdate = BlockIter;

694 }

695

697 RegUnits, TII, P, IncrementalUpdate);

698

699

701 break;

702 }

703

704

706 for (auto *InstrPtr : InsInstrs)

707 MF->deleteMachineInstr(InstrPtr);

708 }

709 InstrIdxForVirtReg.clear();

710 }

711 }

712

713 if (Changed && IncrementalUpdate)

716}

717

718bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) {

723 TSchedModel.init(STI);

725 MLI = &getAnalysis().getLI();

726 Traces = &getAnalysis().getMTM();

727 PSI = &getAnalysis().getPSI();

729 &getAnalysis().getBFI() :

730 nullptr;

731 TraceEnsemble = nullptr;

733

738 << " Skipping pass: Target does not support machine combiner\n");

739 return false;

740 }

741

743

744

745 for (auto &MBB : MF)

746 Changed |= combineInstructions(&MBB);

747

749}

unsigned const MachineRegisterInfo * MRI

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

This file defines the DenseMap class.

===- LazyMachineBlockFrequencyInfo.h - Lazy Block Frequency -*- C++ -*–===//

static void insertDeleteInstructions(MachineBasicBlock *MBB, MachineInstr &MI, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, MachineTraceMetrics::Ensemble *TraceEnsemble, LiveRegUnitSet &RegUnits, const TargetInstrInfo *TII, unsigned Pattern, bool IncrementalUpdate)

Inserts InsInstrs and deletes DelInstrs.

Definition MachineCombiner.cpp:481

static cl::opt< bool > VerifyPatternOrder("machine-combiner-verify-pattern-order", cl::Hidden, cl::desc("Verify that the generated patterns are ordered by increasing latency"), cl::init(false))

static cl::opt< unsigned > inc_threshold("machine-combiner-inc-threshold", cl::Hidden, cl::desc("Incremental depth computation will be used for basic " "blocks with more instructions."), cl::init(500))

static cl::opt< bool > dump_intrs("machine-combiner-dump-subst-intrs", cl::Hidden, cl::desc("Dump all substituted intrs"), cl::init(false))

Register const TargetRegisterInfo * TRI

Promote Memory to Register

uint64_t IntrinsicInst * II

#define INITIALIZE_PASS_DEPENDENCY(depName)

#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)

#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

Represent the analysis usage information of a pass.

iterator find(const_arg_type_t< KeyT > Val)

This is an alternative analysis pass to MachineBlockFrequencyInfo.

LoopT * getLoopFor(const BlockT *BB) const

Return the inner most loop that BB lives in.

unsigned getSchedClass() const

Return the scheduling class for this instruction.

const MCInstrDesc & get(unsigned Opcode) const

Return the machine instruction descriptor that corresponds to the specified instruction opcode.

const MCSchedModel & getSchedModel() const

Get the machine model for this subtarget's CPU.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

MachineInstrBundleIterator< MachineInstr > iterator

LLVM_ABI StringRef getName() const

Return the name of the corresponding LLVM basic block, or an empty string.

MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...

Analysis pass which computes a MachineDominatorTree.

MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

StringRef getName() const

getName - Return the name of the corresponding LLVM function.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

Representation of each machine instruction.

const MachineBasicBlock * getParent() const

filtered_mop_range all_defs()

Returns an iterator range over all operands that are (explicit or implicit) register defs.

LLVM_ABI int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const

Returns the operand index that is a use of the specific register or -1 if it is not found.

LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const

Returns the operand index that is a def of the specified register or -1 if it is not found.

MachineOperand class - Representation of each machine instruction operand.

bool isReg() const

isReg - Tests if this is a MO_Register operand.

MachineInstr * getParent()

getParent - Return the instruction that this operand belongs to.

Register getReg() const

getReg - Returns the register number.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

defusechain_iterator< true, true, false, true, false > reg_iterator

reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified register.

A trace ensemble is a collection of traces selected using the same strategy, for example 'minimum res...

void invalidate(const MachineBasicBlock *MBB)

Invalidate traces through BadMBB.

void updateDepth(TraceBlockInfo &TBI, const MachineInstr &, LiveRegUnitSet &RegUnits)

Updates the depth of an machine instruction, given RegUnits.

void updateDepths(MachineBasicBlock::iterator Start, MachineBasicBlock::iterator End, LiveRegUnitSet &RegUnits)

Updates the depth of the instructions from Start to End.

Trace getTrace(const MachineBasicBlock *MBB)

Get the trace that passes through MBB.

unsigned getResourceLength(ArrayRef< const MachineBasicBlock * > Extrablocks={}, ArrayRef< const MCSchedClassDesc * > ExtraInstrs={}, ArrayRef< const MCSchedClassDesc * > RemoveInstrs={}) const

Return the resource length of the trace.

InstrCycles getInstrCycles(const MachineInstr &MI) const

Return the depth and height of MI.

unsigned getInstrSlack(const MachineInstr &MI) const

Return the slack of MI.

bool isDepInTrace(const MachineInstr &DefMI, const MachineInstr &UseMI) const

A dependence is useful if the basic block of the defining instruction is part of the trace of the use...

Ensemble * getEnsemble(MachineTraceStrategy)

Get the trace ensemble representing the given trace selection strategy.

void verifyAnalysis() const

void invalidate(const MachineBasicBlock *MBB)

Invalidate cached information about MBB.

static LLVM_ABI PassRegistry * getPassRegistry()

getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...

An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.

Analysis providing profile information.

bool hasProfileSummary() const

Returns true if profile summary is available.

LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)

runOnFunction - Prepare to answer questions about MF.

constexpr bool isVirtual() const

Return true if the specified register number is in the virtual register namespace.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

iterator erase(iterator I)

erase - Erases an existing element identified by a valid iterator.

const_iterator begin() const

const_iterator end() const

void setUniverse(unsigned U)

setUniverse - Set the universe size which determines the largest key the set can hold.

TargetInstrInfo - Interface to description of machine instruction set.

virtual bool isThroughputPattern(unsigned Pattern) const

Return true when a code sequence can improve throughput.

virtual bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const

Return true if target supports reassociation of instructions in machine combiner pass to reduce regis...

virtual bool useMachineCombiner() const

Return true when a target supports MachineCombiner.

virtual int getExtendResourceLenLimit() const

The limit on resource length extension we accept in MachineCombiner Pass.

virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const

When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...

virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const

Return true when there is potentially a faster code sequence for an instruction chain ending in Root.

virtual bool accumulateInstrSeqToRootLatency(MachineInstr &Root) const

When calculate the latency of the root instruction, accumulate the latency of the sequence to the roo...

virtual CombinerObjective getCombinerObjective(unsigned Pattern) const

Return the objective of a combiner pattern.

virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const

Return a strategy that MachineCombiner must use when creating traces.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

Provide an instruction scheduling machine model to CodeGen passes.

LLVM_ABI bool hasInstrSchedModel() const

Return true if this machine model includes an instruction-level scheduling model.

LLVM_ABI void init(const TargetSubtargetInfo *TSInfo, bool EnableSModel=true, bool EnableSItins=true)

Initialize the machine model for instruction scheduling.

LLVM_ABI unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const

Compute operand latency based on the available machine model.

bool hasInstrSchedModelOrItineraries() const

Return true if this machine model includes an instruction-level scheduling model or cycle-to-cycle it...

TargetSubtargetInfo - Generic base class for all target subtargets.

virtual const TargetInstrInfo * getInstrInfo() const

virtual const TargetRegisterInfo * getRegisterInfo() const =0

Return the target's register information.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

LLVM_ABI void initializeMachineCombinerPass(PassRegistry &)

LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)

Returns true if machine function MF is suggested to be size-optimized based on the profile.

LLVM_ABI char & MachineCombinerID

This pass performs instruction combining using trace metrics to estimate critical-path and resource d...

Definition MachineCombiner.cpp:128

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

CombinerObjective

The combiner's goal may differ based on which pattern it is attempting to optimize.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

SparseSet< LiveRegUnit, MCRegUnit, MCRegUnitToIndex > LiveRegUnitSet

ArrayRef(const T &OneElt) -> ArrayRef< T >

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

Machine model for scheduling, bundling, and heuristics.

const MCSchedClassDesc * getSchedClassDesc(unsigned SchedClassIdx) const

unsigned Depth

Earliest issue cycle as determined by data dependencies and instruction latencies from the beginning ...