LLVM: lib/CodeGen/GlobalISel/LoadStoreOpt.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

38#include

39

40#define DEBUG_TYPE "loadstore-opt"

41

42using namespace llvm;

43using namespace ore;

45

46STATISTIC(NumStoresMerged, "Number of stores merged");

47

49

52 false, false)

55

58

61

63 this->MF = &MF;

68 Builder.setMF(MF);

69 IsPreLegalizer = !MF.getProperties().hasLegalized();

70 InstsToErase.clear();

71}

72

79

86 Info.setBase(Ptr);

87 Info.setOffset(0);

88 return Info;

89 }

90 Info.setBase(BaseReg);

92 if (RHSCst)

93 Info.setOffset(RHSCst->Value.getSExtValue());

94

95

96

97 Info.setIndex(PtrAddRHS);

98 return Info;

99}

100

103 bool &IsAlias,

107 if (!LdSt1 || !LdSt2)

108 return false;

109

112

114 return false;

115

118

119 int64_t PtrDiff;

123

124

125

126

127

129

130

131

132 IsAlias = !((int64_t)Size1.getValue() <= PtrDiff);

133 return true;

134 }

136

137

138

139 IsAlias = !((PtrDiff + (int64_t)Size2.getValue()) <= 0);

140 return true;

141 }

142 return false;

143 }

144

145

146

147

148

151 if (!Base0Def || !Base1Def)

152 return false;

153

154

155 if (Base0Def->getOpcode() != Base1Def->getOpcode())

156 return false;

157

158 if (Base0Def->getOpcode() == TargetOpcode::G_FRAME_INDEX) {

160

161

162 if (Base0Def != Base1Def &&

165 IsAlias = false;

166 return true;

167 }

168 }

169

170

171

172 if (Base0Def->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {

173 auto GV0 = Base0Def->getOperand(1).getGlobal();

174 auto GV1 = Base1Def->getOperand(1).getGlobal();

175 if (GV0 != GV1) {

176 IsAlias = false;

177 return true;

178 }

179 }

180

181

182 return false;

183}

184

189 struct MemUseCharacteristics {

190 bool IsVolatile;

191 bool IsAtomic;

196 };

197

198 auto getCharacteristics =

199 [&](const MachineInstr *MI) -> MemUseCharacteristics {

203

206 BaseReg = LS->getPointerReg();

208 }

209

211 return {LS->isVolatile(), LS->isAtomic(), BaseReg,

212 Offset , Size, &LS->getMMO()};

213 }

214

215

216 return {false ,

217 false,

219 (int64_t)0 ,

222 };

223 MemUseCharacteristics MUC0 = getCharacteristics(&MI),

224 MUC1 = getCharacteristics(&Other);

225

226

227 if (MUC0.BasePtr.isValid() && MUC0.BasePtr == MUC1.BasePtr &&

228 MUC0.Offset == MUC1.Offset)

229 return true;

230

231

232 if (MUC0.IsVolatile && MUC1.IsVolatile)

233 return true;

234

235

236

237 if (MUC0.IsAtomic && MUC1.IsAtomic)

238 return true;

239

240

241

242 if (MUC0.MMO && MUC1.MMO) {

243 if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||

244 (MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))

245 return false;

246 }

247

248

249

250 if ((MUC0.NumBytes.isScalable() && MUC0.Offset != 0) ||

251 (MUC1.NumBytes.isScalable() && MUC1.Offset != 0))

252 return true;

253

254 const bool BothNotScalable =

255 !MUC0.NumBytes.isScalable() && !MUC1.NumBytes.isScalable();

256

257

258

259 bool IsAlias;

260 if (BothNotScalable &&

262 return IsAlias;

263

264

265 if (!MUC0.MMO || !MUC1.MMO)

266 return true;

267

268

269 int64_t SrcValOffset0 = MUC0.MMO->getOffset();

270 int64_t SrcValOffset1 = MUC1.MMO->getOffset();

273 if (AA && MUC0.MMO->getValue() && MUC1.MMO->getValue() && Size0.hasValue() &&

275

276 int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);

277 int64_t Overlap0 =

279 int64_t Overlap1 =

285

286 if (AA->isNoAlias(

287 MemoryLocation(MUC0.MMO->getValue(), Loc0, MUC0.MMO->getAAInfo()),

288 MemoryLocation(MUC1.MMO->getValue(), Loc1, MUC1.MMO->getAAInfo())))

289 return false;

290 }

291

292

293 return true;

294}

295

296

297

299 return MI.hasUnmodeledSideEffects() || MI.hasOrderedMemoryRef();

300}

301

303

304

305 assert(StoresToMerge.size() > 1 && "Expected multiple stores to merge");

306 LLT OrigTy = MRI->getType(StoresToMerge[0]->getValueReg());

307 LLT PtrTy = MRI->getType(StoresToMerge[0]->getPointerReg());

309

310 initializeStoreMergeTargetInfo(AS);

311 const auto &LegalSizes = LegalStoreSizes[AS];

312

313#ifndef NDEBUG

314 for (auto *StoreMI : StoresToMerge)

315 assert(MRI->getType(StoreMI->getValueReg()) == OrigTy);

316#endif

317

318 bool AnyMerged = false;

319 do {

320 unsigned NumPow2 = llvm::bit_floor(StoresToMerge.size());

322

323 unsigned MergeSizeBits;

324 for (MergeSizeBits = MaxSizeBits; MergeSizeBits > 1; MergeSizeBits /= 2) {

325 LLT StoreTy = LLT::scalar(MergeSizeBits);

326 EVT StoreEVT =

328 if (LegalSizes.size() > MergeSizeBits && LegalSizes[MergeSizeBits] &&

329 TLI->canMergeStoresTo(AS, StoreEVT, *MF) &&

330 (TLI->isTypeLegal(StoreEVT)))

331 break;

332 }

334 return AnyMerged;

335

336 unsigned NumStoresToMerge = MergeSizeBits / OrigTy.getSizeInBits();

337

339 StoresToMerge.begin(), StoresToMerge.begin() + NumStoresToMerge);

340 AnyMerged |= doSingleStoreMerge(SingleMergeStores);

341 StoresToMerge.erase(StoresToMerge.begin(),

342 StoresToMerge.begin() + NumStoresToMerge);

343 } while (StoresToMerge.size() > 1);

344 return AnyMerged;

345}

346

347bool LoadStoreOpt::isLegalOrBeforeLegalizer(const LegalityQuery &Query,

349 auto Action = LI->getAction(Query).Action;

350

352 return false;

353 return IsPreLegalizer || Action == LegalizeAction::Legal;

354}

355

358

359

360

361

362

363 GStore *FirstStore = Stores[0];

364 const unsigned NumStores = Stores.size();

365 LLT SmallTy = MRI->getType(FirstStore->getValueReg());

366 LLT WideValueTy =

368

369

370 DebugLoc MergedLoc = Stores.front()->getDebugLoc();

371 for (auto *Store : drop_begin(Stores))

373

374 Builder.setInstr(*Stores.back());

375 Builder.setDebugLoc(MergedLoc);

376

377

378

379

381 for (auto *Store : Stores) {

382 auto MaybeCst =

384 if (!MaybeCst) {

385 ConstantVals.clear();

386 break;

387 }

389 }

390

392 auto *WideMMO =

393 MF->getMachineMemOperand(&FirstStore->getMMO(), 0, WideValueTy);

394 if (ConstantVals.empty()) {

395

396

397

398 return false;

399 }

400

401 assert(ConstantVals.size() == NumStores);

402

403 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {WideValueTy}}, *MF))

404 return false;

406 for (unsigned Idx = 0; Idx < ConstantVals.size(); ++Idx) {

407

408

409 WideConst.insertBits(ConstantVals[Idx], Idx * SmallTy.getSizeInBits());

410 }

411 WideReg = Builder.buildConstant(WideValueTy, WideConst).getReg(0);

412 auto NewStore =

413 Builder.buildStore(WideReg, FirstStore->getPointerReg(), *WideMMO);

414 (void) NewStore;

416 << " stores into merged store: " << *NewStore);

418 NumStoresMerged += Stores.size();

419

420 MachineOptimizationRemarkEmitter MORE(*MF, nullptr);

421 MORE.emit([&]() {

422 MachineOptimizationRemark R(DEBUG_TYPE, "MergedStore",

425 R << "Merged " << NV("NumMerged", Stores.size()) << " stores of "

427 << " bytes into a single store of "

429 return R;

430 });

431

432 InstsToErase.insert_range(Stores);

433 return true;

434}

435

436bool LoadStoreOpt::processMergeCandidate(StoreMergeCandidate &C) {

437 if (C.Stores.size() < 2) {

438 C.reset();

439 return false;

440 }

441

442 LLVM_DEBUG(dbgs() << "Checking store merge candidate with " << C.Stores.size()

443 << " stores, starting with " << *C.Stores[0]);

444

445

446

447

448

449

450

451

452

453

454

456

457 auto DoesStoreAliasWithPotential = [&](unsigned Idx, GStore &CheckStore) {

458 for (auto AliasInfo : reverse(C.PotentialAliases)) {

459 MachineInstr *PotentialAliasOp = AliasInfo.first;

460 unsigned PreCheckedIdx = AliasInfo.second;

461 if (Idx < PreCheckedIdx) {

462

463

464

465 return false;

466 }

467

469 AA)) {

470 LLVM_DEBUG(dbgs() << "Potential alias " << *PotentialAliasOp

471 << " detected\n");

472 return true;

473 }

474 }

475 return false;

476 };

477

478

479 for (int StoreIdx = C.Stores.size() - 1; StoreIdx >= 0; --StoreIdx) {

480 auto *CheckStore = C.Stores[StoreIdx];

481 if (DoesStoreAliasWithPotential(StoreIdx, *CheckStore))

482 continue;

484 }

485

487 << " stores remaining after alias checks. Merging...\n");

488

489

490 C.reset();

491 if (StoresToMerge.size() < 2)

492 return false;

493 return mergeStores(StoresToMerge);

494}

495

496bool LoadStoreOpt::operationAliasesWithCandidate(MachineInstr &MI,

497 StoreMergeCandidate &C) {

498 if (C.Stores.empty())

499 return false;

500 return llvm::any_of(C.Stores, [&](MachineInstr *OtherMI) {

501 return instMayAlias(MI, *OtherMI, *MRI, AA);

502 });

503}

504

505void LoadStoreOpt::StoreMergeCandidate::addPotentialAlias(MachineInstr &MI) {

506 PotentialAliases.emplace_back(std::make_pair(&MI, Stores.size() - 1));

507}

508

509bool LoadStoreOpt::addStoreToCandidate(GStore &StoreMI,

510 StoreMergeCandidate &C) {

511

512

513 LLT ValueTy = MRI->getType(StoreMI.getValueReg());

514 LLT PtrTy = MRI->getType(StoreMI.getPointerReg());

515

516

518 return false;

519

520

522 return false;

523

524

525

526

528 return false;

529

532 Register StoreBase = BIO.getBase();

533 if (C.Stores.empty()) {

534 C.BasePtr = StoreBase;

535 if (!BIO.hasValidOffset()) {

536 C.CurrentLowestOffset = 0;

537 } else {

538 C.CurrentLowestOffset = BIO.getOffset();

539 }

540

541

542

543 if (BIO.hasValidOffset() &&

544 BIO.getOffset() < static_cast<int64_t>(ValueTy.getSizeInBytes()))

545 return false;

546 C.Stores.emplace_back(&StoreMI);

547 LLVM_DEBUG(dbgs() << "Starting a new merge candidate group with: "

548 << StoreMI);

549 return true;

550 }

551

552

553 if (MRI->getType(C.Stores[0]->getValueReg()).getSizeInBits() !=

555 return false;

556

557 if (MRI->getType(C.Stores[0]->getPointerReg()).getAddressSpace() !=

559 return false;

560

561

562

563 if (C.BasePtr != StoreBase)

564 return false;

565

566

567 if (!BIO.hasValidOffset())

568 return false;

569 if ((C.CurrentLowestOffset -

570 static_cast<int64_t>(ValueTy.getSizeInBytes())) != BIO.getOffset())

571 return false;

572

573

574 C.Stores.emplace_back(&StoreMI);

575 C.CurrentLowestOffset = C.CurrentLowestOffset - ValueTy.getSizeInBytes();

576 LLVM_DEBUG(dbgs() << "Candidate added store: " << StoreMI);

577 return true;

578}

579

580bool LoadStoreOpt::mergeBlockStores(MachineBasicBlock &MBB) {

582

583 StoreMergeCandidate Candidate;

585 if (InstsToErase.contains(&MI))

586 continue;

587

589

590

591 if (!addStoreToCandidate(*StoreMI, Candidate)) {

592

593

594 if (operationAliasesWithCandidate(*StoreMI, Candidate)) {

595 Changed |= processMergeCandidate(Candidate);

596 continue;

597 }

598 Candidate.addPotentialAlias(*StoreMI);

599 }

600 continue;

601 }

602

603

604 if (Candidate.Stores.empty())

605 continue;

606

607

609 Changed |= processMergeCandidate(Candidate);

610 Candidate.Stores.clear();

611 continue;

612 }

613

614 if (MI.mayLoadOrStore())

615 continue;

616

617 if (operationAliasesWithCandidate(MI, Candidate)) {

618

619

620 Changed |= processMergeCandidate(Candidate);

621 continue;

622 }

623

624

625

626 Candidate.addPotentialAlias(MI);

627 }

628

629

630 Changed |= processMergeCandidate(Candidate);

631

632

633 for (auto *MI : InstsToErase)

634 MI->eraseFromParent();

635 InstsToErase.clear();

637}

638

639

640

641

642

643

644

645static std::optional<int64_t>

650 return std::nullopt;

651

652

653

654

655

656

657

659 int64_t ShiftAmt;

663 if (!SrcVal.isValid() || TruncVal == SrcVal) {

665 SrcVal = TruncVal;

666 return 0;

667 }

668 return std::nullopt;

669 }

670

671 unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits();

672 if (ShiftAmt % NarrowBits != 0)

673 return std::nullopt;

674 const unsigned Offset = ShiftAmt / NarrowBits;

675

676 if (SrcVal.isValid() && FoundSrcVal != SrcVal)

677 return std::nullopt;

678

680 SrcVal = FoundSrcVal;

681 else if (MRI.getType(SrcVal) != MRI.getType(FoundSrcVal))

682 return std::nullopt;

684}

685

686

687

688

689

690

691

692

693

694

695

696

697

698

699

700

701

702

703

704

705

706

707

708bool LoadStoreOpt::mergeTruncStore(GStore &StoreMI,

709 SmallPtrSetImpl<GStore *> &DeletedStores) {

711

712

714 return false;

716 case 8:

717 case 16:

718 case 32:

719 break;

720 default:

721 return false;

722 }

724 return false;

725

726

727

729

730

731

732

733

734

735

736 auto &LastStore = StoreMI;

737

738

740 int64_t LastOffset;

741 if (mi\_match(LastStore.getPointerReg(), *MRI,

743 BaseReg = LastStore.getPointerReg();

744 LastOffset = 0;

745 }

746

747 GStore *LowestIdxStore = &LastStore;

748 int64_t LowestIdxOffset = LastOffset;

749

752 if (!LowestShiftAmt)

753 return false;

755

756 LLT WideStoreTy = MRI->getType(WideSrcVal);

757

759 return false;

760 const unsigned NumStoresRequired =

762

764 OffsetMap[*LowestShiftAmt] = LastOffset;

766

767 const int MaxInstsToCheck = 10;

768 int NumInstsChecked = 0;

769 for (auto II = ++LastStore.getReverseIterator();

770 II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck;

771 ++II) {

772 NumInstsChecked++;

773 GStore *NewStore;

776 break;

777 } else if (II->isLoadFoldBarrier() || II->mayLoad()) {

778 break;

779 } else {

780 continue;

781 }

782

784 int64_t MemOffset;

785

789 MemOffset = 0;

790 }

791 if (BaseReg != NewBaseReg)

792 break;

793

795 if (!ShiftByteOffset)

796 break;

797 if (MemOffset < LowestIdxOffset) {

798 LowestIdxOffset = MemOffset;

799 LowestIdxStore = NewStore;

800 }

801

802

803

804 if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired ||

805 OffsetMap[*ShiftByteOffset] != INT64_MAX)

806 break;

807 OffsetMap[*ShiftByteOffset] = MemOffset;

808

810

811 NumInstsChecked = 0;

812 if (FoundStores.size() == NumStoresRequired)

813 break;

814 }

815

816 if (FoundStores.size() != NumStoresRequired) {

817 if (FoundStores.size() == 1)

818 return false;

819

820

821

823 }

824

825 unsigned NumStoresFound = FoundStores.size();

826

827 const auto &DL = LastStore.getMF()->getDataLayout();

828 auto &C = LastStore.getMF()->getFunction().getContext();

829

830 unsigned Fast = 0;

831 bool Allowed = TLI->allowsMemoryAccess(

832 C, DL, WideStoreTy, LowestIdxStore->getMMO(), &Fast);

833 if (!Allowed || Fast)

834 return false;

835

836

837

839 auto checkOffsets = [&](bool MatchLittleEndian) {

840 if (MatchLittleEndian) {

841 for (unsigned i = 0; i != NumStoresFound; ++i)

842 if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset)

843 return false;

844 } else {

845 for (unsigned i = 0, j = NumStoresFound - 1; i != NumStoresFound;

846 ++i, --j)

847 if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset)

848 return false;

849 }

850 return true;

851 };

852

853

854 bool NeedBswap = false;

855 bool NeedRotate = false;

856 if (!checkOffsets(DL.isLittleEndian())) {

857

858 if (NarrowBits == 8 && checkOffsets(DL.isBigEndian()))

859 NeedBswap = true;

860 else if (NumStoresFound == 2 && checkOffsets(DL.isBigEndian()))

861 NeedRotate = true;

862 else

863 return false;

864 }

865

866 if (NeedBswap &&

867 !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {WideStoreTy}}, *MF))

868 return false;

869 if (NeedRotate &&

870 !isLegalOrBeforeLegalizer(

871 {TargetOpcode::G_ROTR, {WideStoreTy, WideStoreTy}}, *MF))

872 return false;

873

874 Builder.setInstrAndDebugLoc(StoreMI);

875

876 if (WideStoreTy != MRI->getType(WideSrcVal))

877 WideSrcVal = Builder.buildTrunc(WideStoreTy, WideSrcVal).getReg(0);

878

879 if (NeedBswap) {

880 WideSrcVal = Builder.buildBSwap(WideStoreTy, WideSrcVal).getReg(0);

881 } else if (NeedRotate) {

883 "Unexpected type for rotate");

884 auto RotAmt =

885 Builder.buildConstant(WideStoreTy, WideStoreTy.getSizeInBits() / 2);

886 WideSrcVal =

887 Builder.buildRotateRight(WideStoreTy, WideSrcVal, RotAmt).getReg(0);

888 }

889

890 Builder.buildStore(WideSrcVal, LowestIdxStore->getPointerReg(),

893

894

895 for (auto *ST : FoundStores) {

896 ST->eraseFromParent();

897 DeletedStores.insert(ST);

898 }

899 return true;

900}

901

902bool LoadStoreOpt::mergeTruncStoresBlock(MachineBasicBlock &BB) {

905 SmallPtrSet<GStore *, 8> DeletedStores;

906

910

911 for (auto *StoreMI : Stores) {

912 if (DeletedStores.count(StoreMI))

913 continue;

914 if (mergeTruncStore(*StoreMI, DeletedStores))

916 }

918}

919

920bool LoadStoreOpt::mergeFunctionStores(MachineFunction &MF) {

922 for (auto &BB : MF){

923 Changed |= mergeBlockStores(BB);

924 Changed |= mergeTruncStoresBlock(BB);

925 }

926

927

929 for (auto &BB : MF) {

932 I.eraseFromParent();

933 }

934 }

935 }

936

938}

939

940void LoadStoreOpt::initializeStoreMergeTargetInfo(unsigned AddrSpace) {

941

942

943

944

945 if (LegalStoreSizes.count(AddrSpace)) {

946 assert(LegalStoreSizes[AddrSpace].any());

947 return;

948 }

949

950

952 const auto &LI = *MF->getSubtarget().getLegalizerInfo();

953 const auto &DL = MF->getFunction().getDataLayout();

956

957

964 LegalityQuery Q(TargetOpcode::G_STORE, StoreTys, MemDescrs);

965 LegalizeActionStep ActionStep = LI.getAction(Q);

967 LegalSizes.set(Size);

968 }

969 assert(LegalSizes.any() && "Expected some store sizes to be legal!");

970 LegalStoreSizes[AddrSpace] = LegalSizes;

971}

972

974

975 if (MF.getProperties().hasFailedISel())

976 return false;

977

978 LLVM_DEBUG(dbgs() << "Begin memory optimizations for: " << MF.getName()

979 << '\n');

980

981 init(MF);

983 Changed |= mergeFunctionStores(MF);

984

985 LegalStoreSizes.clear();

987}

unsigned const MachineRegisterInfo * MRI

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Atomic ordering constants.

Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...

Interface for Targets to specify which operations they can successfully select and how the others sho...

const unsigned MaxStoreSizeToForm

Definition LoadStoreOpt.cpp:48

static std::optional< int64_t > getTruncStoreByteOffset(GStore &Store, Register &SrcVal, MachineRegisterInfo &MRI)

Check if the store Store is a truncstore that can be merged.

Definition LoadStoreOpt.cpp:646

static bool isInstHardMergeHazard(MachineInstr &MI)

Returns true if the instruction creates an unavoidable hazard that forces a boundary between store me...

Definition LoadStoreOpt.cpp:298

Implement a low-level type suitable for MachineInstr level instruction selection.

Contains matchers for matching SSA Machine Instructions.

Promote Memory to Register

This file provides utility analysis objects describing memory locations.

uint64_t IntrinsicInst * II

#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)

#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)

dot regions Print regions of function to dot true view regions View regions of function(with no function bodies)"

This file defines the SmallPtrSet class.

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

This file describes how to lower LLVM code to machine code.

A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.

Represent the analysis usage information of a pass.

AnalysisUsage & addRequired()

void setPreservesAll()

Set by analyses that do not transform their input at all.

static LLVM_ABI DebugLoc getMergedLocation(DebugLoc LocA, DebugLoc LocB)

When two instructions are combined into a single instruction we also need to combine the original loc...

Helper struct to store a base, index and offset that forms an address.

int64_t getOffset() const

bool hasValidOffset() const

Register getPointerReg() const

Get the source register of the pointer value.

MachineMemOperand & getMMO() const

Get the MachineMemOperand on this instruction.

LocationSize getMemSizeInBits() const

Returns the size in bits of the memory access.

bool isSimple() const

Returns true if the memory operation is neither atomic or volatile.

Register getValueReg() const

Get the stored value register.

constexpr unsigned getScalarSizeInBits() const

constexpr bool isScalar() const

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr unsigned getAddressSpace() const

constexpr TypeSize getSizeInBytes() const

Returns the total size of the type in bytes, i.e.

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...

Definition LoadStoreOpt.cpp:73

LoadStoreOpt()

Definition LoadStoreOpt.cpp:59

bool runOnMachineFunction(MachineFunction &MF) override

runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...

Definition LoadStoreOpt.cpp:973

static LocationSize precise(uint64_t Value)

static constexpr LocationSize beforeOrAfterPointer()

Any location before or after the base pointer (but still within the underlying object).

TypeSize getValue() const

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

bool isFixedObjectIndex(int ObjectIdx) const

Returns true if the specified index corresponds to a fixed stack object.

MachineFunctionPass(char &ID)

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

const MachineFunctionProperties & getProperties() const

Get the function properties.

Representation of each machine instruction.

const MachineBasicBlock * getParent() const

const DebugLoc & getDebugLoc() const

Returns the debug location id of this MachineInstr.

A description of a memory reference used in the backend.

LLT getMemoryType() const

Return the memory type of the memory reference.

const MachinePointerInfo & getPointerInfo() const

LLVM_ABI Align getAlign() const

Return the minimum known alignment in bytes of the actual memory reference.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

Representation for a specific memory location.

AnalysisType & getAnalysis() const

getAnalysis() - This function is used by subclasses to get to the analysis information ...

static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)

This constructs a pointer to an object of the specified type in a numbered address space.

Wrapper class representing virtual and physical registers.

constexpr bool isValid() const

size_type count(ConstPtrType Ptr) const

count - Return 1 if the specified pointer is in the set, 0 otherwise.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

virtual const LegalizerInfo * getLegalizerInfo() const

virtual const TargetLowering * getTargetLowering() const

constexpr ScalarTy getFixedValue() const

constexpr ScalarTy getKnownMinValue() const

Returns the minimum value this quantity can represent.

Abstract Attribute helper functions.

constexpr bool any(E Val)

@ Fast

Attempts to make calls as fast as possible (e.g.

@ C

The default llvm calling convention, compatible with C.

LLVM_ABI bool aliasIsKnownForLoadStore(const MachineInstr &MI1, const MachineInstr &MI2, bool &IsAlias, MachineRegisterInfo &MRI)

Compute whether or not a memory access at MI1 aliases with an access at MI2.

Definition LoadStoreOpt.cpp:101

LLVM_ABI BaseIndexOffset getPointerInfo(Register Ptr, MachineRegisterInfo &MRI)

Returns a BaseIndexOffset which describes the pointer in Ptr.

Definition LoadStoreOpt.cpp:80

LLVM_ABI bool instMayAlias(const MachineInstr &MI, const MachineInstr &Other, MachineRegisterInfo &MRI, AliasAnalysis *AA)

Returns true if the instruction MI may alias Other.

Definition LoadStoreOpt.cpp:185

@ Legal

The operation is expected to be selectable directly by the target, and no transformation is necessary...

@ Unsupported

This operation is completely unsupported on the target.

operand_type_match m_Reg()

ConstantMatch< APInt > m_ICst(APInt &Cst)

BinaryOp_match< LHS, RHS, TargetOpcode::G_ASHR, false > m_GAShr(const LHS &L, const RHS &R)

bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)

BinaryOp_match< LHS, RHS, TargetOpcode::G_PTR_ADD, false > m_GPtrAdd(const LHS &L, const RHS &R)

Or< Preds... > m_any_of(Preds &&... preds)

BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)

UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)

Add a small namespace to avoid name clashes with the classes used in the streaming interface.

DiagnosticInfoOptimizationBase::Argument NV

BaseReg

Stack frame base register. Bit 0 of FREInfo.Info.

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)

Make a range that does early increment to allow mutation of the underlying range without disrupting i...

LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)

Find the def instruction for Reg, folding away any trivial copies.

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

auto reverse(ContainerTy &&C)

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

LLVM_ABI EVT getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx)

LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)

Modify analysis usage so it preserves passes required for the SelectionDAG fallback.

LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)

If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...

T bit_floor(T Value)

Returns the largest integral power of two no greater than Value if Value is nonzero.

AAResults AliasAnalysis

Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.

LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)

Construct a low-level type based on an LLVM type.

LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)

Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...

Implement std::hash so that hash_code can be used in STL containers.

The LegalityQuery object bundles together all the information that's needed to decide whether a given...

LegalizeAction Action

The action to take or the final answer.