LLVM: lib/CodeGen/GlobalISel/LoadStoreOpt.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

39#include

40

41#define DEBUG_TYPE "loadstore-opt"

42

43using namespace llvm;

44using namespace ore;

45using namespace MIPatternMatch;

46

47STATISTIC(NumStoresMerged, "Number of stores merged");

48

50

53 false, false)

56

59

62

64 this->MF = &MF;

66 AA = &getAnalysis().getAAResults();

69 Builder.setMF(MF);

72 InstsToErase.clear();

73}

74

80}

81

89 Info.setOffset(0);

91 }

92 Info.setBase(BaseReg);

94 if (RHSCst)

95 Info.setOffset(RHSCst->Value.getSExtValue());

96

97

98

99 Info.setIndex(PtrAddRHS);

101}

102

105 bool &IsAlias,

107 auto *LdSt1 = dyn_cast(&MI1);

108 auto *LdSt2 = dyn_cast(&MI2);

109 if (!LdSt1 || !LdSt2)

110 return false;

111

114

116 return false;

117

120

121 int64_t PtrDiff;

125

126

127

128

129

131

132

133

134 IsAlias = !((int64_t)Size1.getValue() <= PtrDiff);

135 return true;

136 }

138

139

140

141 IsAlias = !((PtrDiff + (int64_t)Size2.getValue()) <= 0);

142 return true;

143 }

144 return false;

145 }

146

147

148

149

150

153 if (!Base0Def || !Base1Def)

154 return false;

155

156

157 if (Base0Def->getOpcode() != Base1Def->getOpcode())

158 return false;

159

160 if (Base0Def->getOpcode() == TargetOpcode::G_FRAME_INDEX) {

162

163

164 if (Base0Def != Base1Def &&

167 IsAlias = false;

168 return true;

169 }

170 }

171

172

173

174 if (Base0Def->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {

175 auto GV0 = Base0Def->getOperand(1).getGlobal();

176 auto GV1 = Base1Def->getOperand(1).getGlobal();

177 if (GV0 != GV1) {

178 IsAlias = false;

179 return true;

180 }

181 }

182

183

184 return false;

185}

186

191 struct MemUseCharacteristics {

192 bool IsVolatile;

193 bool IsAtomic;

198 };

199

200 auto getCharacteristics =

201 [&](const MachineInstr *MI) -> MemUseCharacteristics {

202 if (const auto *LS = dyn_cast(MI)) {

205

208 BaseReg = LS->getPointerReg();

210 }

211

213 return {LS->isVolatile(), LS->isAtomic(), BaseReg,

214 Offset , Size, &LS->getMMO()};

215 }

216

217

218 return {false ,

219 false,

221 (int64_t)0 ,

224 };

225 MemUseCharacteristics MUC0 = getCharacteristics(&MI),

226 MUC1 = getCharacteristics(&Other);

227

228

229 if (MUC0.BasePtr.isValid() && MUC0.BasePtr == MUC1.BasePtr &&

230 MUC0.Offset == MUC1.Offset)

231 return true;

232

233

234 if (MUC0.IsVolatile && MUC1.IsVolatile)

235 return true;

236

237

238

239 if (MUC0.IsAtomic && MUC1.IsAtomic)

240 return true;

241

242

243

244 if (MUC0.MMO && MUC1.MMO) {

245 if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||

246 (MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))

247 return false;

248 }

249

250

251

252 if ((MUC0.NumBytes.isScalable() && MUC0.Offset != 0) ||

253 (MUC1.NumBytes.isScalable() && MUC1.Offset != 0))

254 return true;

255

256 const bool BothNotScalable =

257 !MUC0.NumBytes.isScalable() && !MUC1.NumBytes.isScalable();

258

259

260

261 bool IsAlias;

262 if (BothNotScalable &&

264 return IsAlias;

265

266

267 if (!MUC0.MMO || !MUC1.MMO)

268 return true;

269

270

271 int64_t SrcValOffset0 = MUC0.MMO->getOffset();

272 int64_t SrcValOffset1 = MUC1.MMO->getOffset();

275 if (AA && MUC0.MMO->getValue() && MUC1.MMO->getValue() && Size0.hasValue() &&

277

278 int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);

279 int64_t Overlap0 =

281 int64_t Overlap1 =

287

289 MemoryLocation(MUC0.MMO->getValue(), Loc0, MUC0.MMO->getAAInfo()),

290 MemoryLocation(MUC1.MMO->getValue(), Loc1, MUC1.MMO->getAAInfo())))

291 return false;

292 }

293

294

295 return true;

296}

297

298

299

301 return MI.hasUnmodeledSideEffects() || MI.hasOrderedMemoryRef();

302}

303

305

306

307 assert(StoresToMerge.size() > 1 && "Expected multiple stores to merge");

308 LLT OrigTy = MRI->getType(StoresToMerge[0]->getValueReg());

309 LLT PtrTy = MRI->getType(StoresToMerge[0]->getPointerReg());

311

312 initializeStoreMergeTargetInfo(AS);

313 const auto &LegalSizes = LegalStoreSizes[AS];

314

315#ifndef NDEBUG

316 for (auto *StoreMI : StoresToMerge)

317 assert(MRI->getType(StoreMI->getValueReg()) == OrigTy);

318#endif

319

320 bool AnyMerged = false;

321 do {

322 unsigned NumPow2 = llvm::bit_floor(StoresToMerge.size());

324

325 unsigned MergeSizeBits;

326 for (MergeSizeBits = MaxSizeBits; MergeSizeBits > 1; MergeSizeBits /= 2) {

328 EVT StoreEVT =

330 if (LegalSizes.size() > MergeSizeBits && LegalSizes[MergeSizeBits] &&

333 break;

334 }

336 return AnyMerged;

337

338 unsigned NumStoresToMerge = MergeSizeBits / OrigTy.getSizeInBits();

339

341 StoresToMerge.begin(), StoresToMerge.begin() + NumStoresToMerge);

342 AnyMerged |= doSingleStoreMerge(SingleMergeStores);

343 StoresToMerge.erase(StoresToMerge.begin(),

344 StoresToMerge.begin() + NumStoresToMerge);

345 } while (StoresToMerge.size() > 1);

346 return AnyMerged;

347}

348

349bool LoadStoreOpt::isLegalOrBeforeLegalizer(const LegalityQuery &Query,

352

354 return false;

355 return IsPreLegalizer || Action == LegalizeAction::Legal;

356}

357

360

361

362

363

364

365 GStore *FirstStore = Stores[0];

366 const unsigned NumStores = Stores.size();

368 LLT WideValueTy =

370

371

372 DebugLoc MergedLoc = Stores.front()->getDebugLoc();

373 for (auto *Store : drop_begin(Stores))

375

378

379

380

381

383 for (auto *Store : Stores) {

384 auto MaybeCst =

386 if (!MaybeCst) {

387 ConstantVals.clear();

388 break;

389 }

391 }

392

394 auto *WideMMO =

396 if (ConstantVals.empty()) {

397

398

399

400 return false;

401 }

402

403 assert(ConstantVals.size() == NumStores);

404

405 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {WideValueTy}}, *MF))

406 return false;

408 for (unsigned Idx = 0; Idx < ConstantVals.size(); ++Idx) {

409

410

412 }

414 auto NewStore =

416 (void) NewStore;

418 << " stores into merged store: " << *NewStore);

420 NumStoresMerged += Stores.size();

421

423 MORE.emit([&]() {

427 R << "Merged " << NV("NumMerged", Stores.size()) << " stores of "

429 << " bytes into a single store of "

431 return R;

432 });

433

434 for (auto *MI : Stores)

435 InstsToErase.insert(MI);

436 return true;

437}

438

439bool LoadStoreOpt::processMergeCandidate(StoreMergeCandidate &C) {

440 if (C.Stores.size() < 2) {

441 C.reset();

442 return false;

443 }

444

445 LLVM_DEBUG(dbgs() << "Checking store merge candidate with " << C.Stores.size()

446 << " stores, starting with " << *C.Stores[0]);

447

448

449

450

451

452

453

454

455

456

457

459

460 auto DoesStoreAliasWithPotential = [&](unsigned Idx, GStore &CheckStore) {

461 for (auto AliasInfo : reverse(C.PotentialAliases)) {

462 MachineInstr *PotentialAliasOp = AliasInfo.first;

463 unsigned PreCheckedIdx = AliasInfo.second;

464 if (static_cast<unsigned>(Idx) < PreCheckedIdx) {

465

466

467

468 return false;

469 }

470

472 AA)) {

473 LLVM_DEBUG(dbgs() << "Potential alias " << *PotentialAliasOp

474 << " detected\n");

475 return true;

476 }

477 }

478 return false;

479 };

480

481

482 for (int StoreIdx = C.Stores.size() - 1; StoreIdx >= 0; --StoreIdx) {

483 auto *CheckStore = C.Stores[StoreIdx];

484 if (DoesStoreAliasWithPotential(StoreIdx, *CheckStore))

485 continue;

487 }

488

490 << " stores remaining after alias checks. Merging...\n");

491

492

493 C.reset();

494 if (StoresToMerge.size() < 2)

495 return false;

496 return mergeStores(StoresToMerge);

497}

498

499bool LoadStoreOpt::operationAliasesWithCandidate(MachineInstr &MI,

500 StoreMergeCandidate &C) {

501 if (C.Stores.empty())

502 return false;

504 return instMayAlias(MI, *OtherMI, *MRI, AA);

505 });

506}

507

508void LoadStoreOpt::StoreMergeCandidate::addPotentialAlias(MachineInstr &MI) {

509 PotentialAliases.emplace_back(std::make_pair(&MI, Stores.size() - 1));

510}

511

512bool LoadStoreOpt::addStoreToCandidate(GStore &StoreMI,

513 StoreMergeCandidate &C) {

514

515

518

519

521 return false;

522

523

525 return false;

526

527

528

529

531 return false;

532

535 Register StoreBase = BIO.getBase();

536 if (C.Stores.empty()) {

537 C.BasePtr = StoreBase;

538 if (!BIO.hasValidOffset()) {

539 C.CurrentLowestOffset = 0;

540 } else {

541 C.CurrentLowestOffset = BIO.getOffset();

542 }

543

544

545

546 if (BIO.hasValidOffset() &&

547 BIO.getOffset() < static_cast<int64_t>(ValueTy.getSizeInBytes()))

548 return false;

549 C.Stores.emplace_back(&StoreMI);

550 LLVM_DEBUG(dbgs() << "Starting a new merge candidate group with: "

551 << StoreMI);

552 return true;

553 }

554

555

558 return false;

559

562 return false;

563

564

565

566 if (C.BasePtr != StoreBase)

567 return false;

568

569

570 if (!BIO.hasValidOffset())

571 return false;

572 if ((C.CurrentLowestOffset -

573 static_cast<int64_t>(ValueTy.getSizeInBytes())) != BIO.getOffset())

574 return false;

575

576

577 C.Stores.emplace_back(&StoreMI);

578 C.CurrentLowestOffset = C.CurrentLowestOffset - ValueTy.getSizeInBytes();

579 LLVM_DEBUG(dbgs() << "Candidate added store: " << StoreMI);

580 return true;

581}

582

584 bool Changed = false;

585

586 StoreMergeCandidate Candidate;

588 if (InstsToErase.contains(&MI))

589 continue;

590

591 if (auto *StoreMI = dyn_cast(&MI)) {

592

593

594 if (!addStoreToCandidate(*StoreMI, Candidate)) {

595

596

597 if (operationAliasesWithCandidate(*StoreMI, Candidate)) {

598 Changed |= processMergeCandidate(Candidate);

599 continue;

600 }

601 Candidate.addPotentialAlias(*StoreMI);

602 }

603 continue;

604 }

605

606

607 if (Candidate.Stores.empty())

608 continue;

609

610

612 Changed |= processMergeCandidate(Candidate);

613 Candidate.Stores.clear();

614 continue;

615 }

616

617 if (MI.mayLoadOrStore())

618 continue;

619

620 if (operationAliasesWithCandidate(MI, Candidate)) {

621

622

623 Changed |= processMergeCandidate(Candidate);

624 continue;

625 }

626

627

628

629 Candidate.addPotentialAlias(MI);

630 }

631

632

633 Changed |= processMergeCandidate(Candidate);

634

635

636 for (auto *MI : InstsToErase)

637 MI->eraseFromParent();

638 InstsToErase.clear();

639 return Changed;

640}

641

642

643

644

645

646

647

648static std::optional<int64_t>

653 return std::nullopt;

654

655

656

657

658

659

660

662 int64_t ShiftAmt;

666 if (!SrcVal.isValid() || TruncVal == SrcVal) {

668 SrcVal = TruncVal;

669 return 0;

670 }

671 return std::nullopt;

672 }

673

674 unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits();

675 if (ShiftAmt % NarrowBits != 0)

676 return std::nullopt;

677 const unsigned Offset = ShiftAmt / NarrowBits;

678

679 if (SrcVal.isValid() && FoundSrcVal != SrcVal)

680 return std::nullopt;

681

683 SrcVal = FoundSrcVal;

684 else if (MRI.getType(SrcVal) != MRI.getType(FoundSrcVal))

685 return std::nullopt;

687}

688

689

690

691

692

693

694

695

696

697

698

699

700

701

702

703

704

705

706

707

708

709

710

711bool LoadStoreOpt::mergeTruncStore(GStore &StoreMI,

714

715

717 return false;

719 case 8:

720 case 16:

721 case 32:

722 break;

723 default:

724 return false;

725 }

727 return false;

728

729

730

732

733

734

735

736

737

738

739 auto &LastStore = StoreMI;

740

741

743 int64_t LastOffset;

744 if (mi\_match(LastStore.getPointerReg(), *MRI,

746 BaseReg = LastStore.getPointerReg();

747 LastOffset = 0;

748 }

749

750 GStore *LowestIdxStore = &LastStore;

751 int64_t LowestIdxOffset = LastOffset;

752

755 if (!LowestShiftAmt)

756 return false;

758

759 LLT WideStoreTy = MRI->getType(WideSrcVal);

760

762 return false;

763 const unsigned NumStoresRequired =

765

767 OffsetMap[*LowestShiftAmt] = LastOffset;

769

770 const int MaxInstsToCheck = 10;

771 int NumInstsChecked = 0;

772 for (auto II = ++LastStore.getReverseIterator();

773 II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck;

774 ++II) {

775 NumInstsChecked++;

777 if ((NewStore = dyn_cast(&*II))) {

779 break;

780 } else if (II->isLoadFoldBarrier() || II->mayLoad()) {

781 break;

782 } else {

783 continue;

784 }

785

787 int64_t MemOffset;

788

792 MemOffset = 0;

793 }

794 if (BaseReg != NewBaseReg)

795 break;

796

798 if (!ShiftByteOffset)

799 break;

800 if (MemOffset < LowestIdxOffset) {

801 LowestIdxOffset = MemOffset;

802 LowestIdxStore = NewStore;

803 }

804

805

806

807 if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired ||

808 OffsetMap[*ShiftByteOffset] != INT64_MAX)

809 break;

810 OffsetMap[*ShiftByteOffset] = MemOffset;

811

813

814 NumInstsChecked = 0;

815 if (FoundStores.size() == NumStoresRequired)

816 break;

817 }

818

819 if (FoundStores.size() != NumStoresRequired) {

820 if (FoundStores.size() == 1)

821 return false;

822

823

824

826 }

827

828 unsigned NumStoresFound = FoundStores.size();

829

830 const auto &DL = LastStore.getMF()->getDataLayout();

831 auto &C = LastStore.getMF()->getFunction().getContext();

832

833 unsigned Fast = 0;

835 C, DL, WideStoreTy, LowestIdxStore->getMMO(), &Fast);

836 if (!Allowed || Fast)

837 return false;

838

839

840

842 auto checkOffsets = [&](bool MatchLittleEndian) {

843 if (MatchLittleEndian) {

844 for (unsigned i = 0; i != NumStoresFound; ++i)

845 if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset)

846 return false;

847 } else {

848 for (unsigned i = 0, j = NumStoresFound - 1; i != NumStoresFound;

849 ++i, --j)

850 if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset)

851 return false;

852 }

853 return true;

854 };

855

856

857 bool NeedBswap = false;

858 bool NeedRotate = false;

859 if (!checkOffsets(DL.isLittleEndian())) {

860

861 if (NarrowBits == 8 && checkOffsets(DL.isBigEndian()))

862 NeedBswap = true;

863 else if (NumStoresFound == 2 && checkOffsets(DL.isBigEndian()))

864 NeedRotate = true;

865 else

866 return false;

867 }

868

869 if (NeedBswap &&

870 !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {WideStoreTy}}, *MF))

871 return false;

872 if (NeedRotate &&

873 !isLegalOrBeforeLegalizer(

874 {TargetOpcode::G_ROTR, {WideStoreTy, WideStoreTy}}, *MF))

875 return false;

876

878

879 if (WideStoreTy != MRI->getType(WideSrcVal))

880 WideSrcVal = Builder.buildTrunc(WideStoreTy, WideSrcVal).getReg(0);

881

882 if (NeedBswap) {

883 WideSrcVal = Builder.buildBSwap(WideStoreTy, WideSrcVal).getReg(0);

884 } else if (NeedRotate) {

886 "Unexpected type for rotate");

887 auto RotAmt =

889 WideSrcVal =

891 }

892

896

897

898 for (auto *ST : FoundStores) {

899 ST->eraseFromParent();

900 DeletedStores.insert(ST);

901 }

902 return true;

903}

904

906 bool Changed = false;

909

911 if (auto *StoreMI = dyn_cast(&MI))

913

914 for (auto *StoreMI : Stores) {

915 if (DeletedStores.count(StoreMI))

916 continue;

917 if (mergeTruncStore(*StoreMI, DeletedStores))

918 Changed = true;

919 }

920 return Changed;

921}

922

923bool LoadStoreOpt::mergeFunctionStores(MachineFunction &MF) {

924 bool Changed = false;

925 for (auto &BB : MF){

926 Changed |= mergeBlockStores(BB);

927 Changed |= mergeTruncStoresBlock(BB);

928 }

929

930

931 if (Changed) {

932 for (auto &BB : MF) {

935 I.eraseFromParent();

936 }

937 }

938 }

939

940 return Changed;

941}

942

943void LoadStoreOpt::initializeStoreMergeTargetInfo(unsigned AddrSpace) {

944

945

946

947

948 if (LegalStoreSizes.count(AddrSpace)) {

949 assert(LegalStoreSizes[AddrSpace].any());

950 return;

951 }

952

953

955 const auto &LI = *MF->getSubtarget().getLegalizerInfo();

956 const auto &DL = MF->getFunction().getDataLayout();

959

960

966 LegalityQuery Q(TargetOpcode::G_STORE, StoreTys, MemDescrs);

969 LegalSizes.set(Size);

970 }

971 assert(LegalSizes.any() && "Expected some store sizes to be legal!");

972 LegalStoreSizes[AddrSpace] = LegalSizes;

973}

974

976

979 return false;

980

982 << '\n');

983

984 init(MF);

985 bool Changed = false;

986 Changed |= mergeFunctionStores(MF);

987

988 LegalStoreSizes.clear();

989 return Changed;

990}

unsigned const MachineRegisterInfo * MRI

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Atomic ordering constants.

Analysis containing CSE Info

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

Performs the initial survey of the specified function

Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...

Interface for Targets to specify which operations they can successfully select and how the others sho...

Generic memory optimizations

const unsigned MaxStoreSizeToForm

static std::optional< int64_t > getTruncStoreByteOffset(GStore &Store, Register &SrcVal, MachineRegisterInfo &MRI)

Check if the store Store is a truncstore that can be merged.

static bool isInstHardMergeHazard(MachineInstr &MI)

Returns true if the instruction creates an unavoidable hazard that forces a boundary between store me...

Implement a low-level type suitable for MachineInstr level instruction selection.

Contains matchers for matching SSA Machine Instructions.

This file provides utility analysis objects describing memory locations.

uint64_t IntrinsicInst * II

#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)

#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

This file defines the SmallPtrSet class.

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

This file describes how to lower LLVM code to machine code.

A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.

bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)

A trivial helper function to check to see if the specified pointers are no-alias.

Class for arbitrary precision integers.

Represent the analysis usage information of a pass.

AnalysisUsage & addRequired()

void setPreservesAll()

Set by analyses that do not transform their input at all.

static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)

When two instructions are combined into a single instruction we also need to combine the original loc...

LLVMContext & getContext() const

getContext - Return a reference to the LLVMContext associated with this function.

Helper struct to store a base, index and offset that forms an address.

int64_t getOffset() const

bool hasValidOffset() const

Register getPointerReg() const

Get the source register of the pointer value.

MachineMemOperand & getMMO() const

Get the MachineMemOperand on this instruction.

LocationSize getMemSizeInBits() const

Returns the size in bits of the memory access.

bool isSimple() const

Returns true if the memory operation is neither atomic or volatile.

Register getValueReg() const

Get the stored value register.

constexpr unsigned getScalarSizeInBits() const

constexpr bool isScalar() const

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr unsigned getAddressSpace() const

constexpr TypeSize getSizeInBytes() const

Returns the total size of the type in bytes, i.e.

LegalizeActionStep getAction(const LegalityQuery &Query) const

Determine what action should be taken to legalize the described instruction.

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...

bool runOnMachineFunction(MachineFunction &MF) override

runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...

static LocationSize precise(uint64_t Value)

static constexpr LocationSize beforeOrAfterPointer()

Any location before or after the base pointer (but still within the underlying object).

TypeSize getValue() const

reverse_iterator rbegin()

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

bool isFixedObjectIndex(int ObjectIdx) const

Returns true if the specified index corresponds to a fixed stack object.

MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.

bool hasProperty(Property P) const

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

StringRef getName() const

getName - Return the name of the corresponding LLVM function.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

Function & getFunction()

Return the LLVM function that this machine code represents.

const MachineFunctionProperties & getProperties() const

Get the function properties.

MachineInstrBuilder buildRotateRight(const DstOp &Dst, const SrcOp &Src, const SrcOp &Amt)

Build and insert Dst = G_ROTR Src, Amt.

void setInstr(MachineInstr &MI)

Set the insertion point to before MI.

MachineInstrBuilder buildBSwap(const DstOp &Dst, const SrcOp &Src0)

Build and insert Dst = G_BSWAP Src0.

MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert G_STORE Val, Addr, MMO.

void setInstrAndDebugLoc(MachineInstr &MI)

Set the insertion point to before MI, and set the debug loc to MI's loc.

MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_TRUNC Op.

void setDebugLoc(const DebugLoc &DL)

Set the debug location to DL for all the next build instructions.

void setMF(MachineFunction &MF)

virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)

Build and insert Res = G_CONSTANT Val.

Register getReg(unsigned Idx) const

Get the register for the operand index.

Representation of each machine instruction.

const MachineBasicBlock * getParent() const

const DebugLoc & getDebugLoc() const

Returns the debug location id of this MachineInstr.

A description of a memory reference used in the backend.

LLT getMemoryType() const

Return the memory type of the memory reference.

const MachinePointerInfo & getPointerInfo() const

Align getAlign() const

Return the minimum known alignment in bytes of the actual memory reference.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

LLT getType(Register Reg) const

Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.

Representation for a specific memory location.

static PointerType * get(Type *ElementType, unsigned AddressSpace)

This constructs a pointer to an object of the specified type in a numbered address space.

Wrapper class representing virtual and physical registers.

constexpr bool isValid() const

A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

bool isTypeLegal(EVT VT) const

Return true if the target has native support for the specified value type.

virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const

Return true if the target supports a memory access of this type for the given address space and align...

virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, const MachineFunction &MF) const

Returns if it's reasonable to merge stores to MemVT size.

virtual const LegalizerInfo * getLegalizerInfo() const

virtual const TargetLowering * getTargetLowering() const

The instances of the Type class are immutable: once they are created, they are never changed.

constexpr ScalarTy getFixedValue() const

constexpr ScalarTy getKnownMinValue() const

Returns the minimum value this quantity can represent.

constexpr bool any(E Val)

@ Fast

Attempts to make calls as fast as possible (e.g.

@ C

The default llvm calling convention, compatible with C.

bool aliasIsKnownForLoadStore(const MachineInstr &MI1, const MachineInstr &MI2, bool &IsAlias, MachineRegisterInfo &MRI)

Compute whether or not a memory access at MI1 aliases with an access at MI2.

BaseIndexOffset getPointerInfo(Register Ptr, MachineRegisterInfo &MRI)

Returns a BaseIndexOffset which describes the pointer in Ptr.

bool instMayAlias(const MachineInstr &MI, const MachineInstr &Other, MachineRegisterInfo &MRI, AliasAnalysis *AA)

Returns true if the instruction MI may alias Other.

@ Legal

The operation is expected to be selectable directly by the target, and no transformation is necessary...

@ Unsupported

This operation is completely unsupported on the target.

operand_type_match m_Reg()

ConstantMatch< APInt > m_ICst(APInt &Cst)

BinaryOp_match< LHS, RHS, TargetOpcode::G_ASHR, false > m_GAShr(const LHS &L, const RHS &R)

bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)

BinaryOp_match< LHS, RHS, TargetOpcode::G_PTR_ADD, false > m_GPtrAdd(const LHS &L, const RHS &R)

Or< Preds... > m_any_of(Preds &&... preds)

BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)

UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)

DiagnosticInfoOptimizationBase::Argument NV

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)

Make a range that does early increment to allow mutation of the underlying range without disrupting i...

MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)

Find the def instruction for Reg, folding away any trivial copies.

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

auto reverse(ContainerTy &&C)

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

EVT getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx)

void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)

Modify analysis usage so it preserves passes required for the SelectionDAG fallback.

std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)

If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...

T bit_floor(T Value)

Returns the largest integral power of two no greater than Value if Value is nonzero.

LLT getLLTForType(Type &Ty, const DataLayout &DL)

Construct a low-level type based on an LLVM type.

bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)

Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...

Implement std::hash so that hash_code can be used in STL containers.

The LegalityQuery object bundles together all the information that's needed to decide whether a given...

LegalizeAction Action

The action to take or the final answer.