LLVM: lib/Target/AArch64/AArch64TargetMachine.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

54#include

55#include

56#include

57

58using namespace llvm;

59

61 cl::desc("Enable the CCMP formation pass"),

63

66 cl::desc("Enable the conditional branch tuning pass"),

68

70 "aarch64-enable-copy-propagation",

71 cl::desc("Enable the copy propagation with AArch64 copy instr"),

73

75 cl::desc("Enable the machine combiner pass"),

77

79 cl::desc("Suppress STP for AArch64"),

81

83 "aarch64-enable-simd-scalar",

84 cl::desc("Enable use of AdvSIMD scalar integer instructions"),

86

89 cl::desc("Enable the promote constant pass"),

91

93 "aarch64-enable-collect-loh",

94 cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),

96

99 cl::desc("Enable the pass that removes dead"

100 " definitions and replaces stores to"

101 " them with stores to the zero"

102 " register"),

104

106 "aarch64-enable-copyelim",

107 cl::desc("Enable the redundant copy elimination pass"), cl::init(true),

109

111 cl::desc("Enable the load/store pair"

112 " optimization pass"),

114

116 "aarch64-enable-atomic-cfg-tidy", cl::Hidden,

117 cl::desc("Run SimplifyCFG after expanding atomic operations"

118 " to make use of cmpxchg flow-based information"),

120

123 cl::desc("Run early if-conversion"),

125

128 cl::desc("Enable the condition optimizer pass"),

130

133 cl::desc("Enable optimizations on complex GEPs"),

135

138 cl::desc("Enable select to branch optimizations"),

140

143 cl::desc("Relax out of range conditional branches"));

144

147 cl::desc("Use smallest entry possible for jump tables"));

148

149

152 cl::desc("Enable the global merge pass"));

153

156 cl::desc("Enable the loop data prefetch pass"),

158

160 "aarch64-enable-global-isel-at-O", cl::Hidden,

161 cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"),

163

166 cl::desc("Enable SVE intrinsic opts"),

168

172 cl::desc("Perform SME peephole optimization"));

173

176

179 cl::desc("Enable the AArch64 branch target pass"),

181

183 "aarch64-sve-vector-bits-max",

184 cl::desc("Assume SVE vector registers are at most this big, "

185 "with zero meaning no maximum size is assumed."),

187

189 "aarch64-sve-vector-bits-min",

190 cl::desc("Assume SVE vector registers are at least this big, "

191 "with zero meaning no minimum size is assumed."),

193

195 "force-streaming",

196 cl::desc("Force the use of streaming code for all functions"),

198

200 "force-streaming-compatible",

201 cl::desc("Force the use of streaming-compatible code for all functions"),

203

205

207 "aarch64-enable-gisel-ldst-prelegal",

208 cl::desc("Enable GlobalISel's pre-legalizer load/store optimization pass"),

210

212 "aarch64-enable-gisel-ldst-postlegal",

213 cl::desc("Enable GlobalISel's post-legalizer load/store optimization pass"),

215

218 cl::desc("Enable sinking and folding of instruction copies"),

220

223 cl::desc("Enable Machine Pipeliner for AArch64"),

225

227

271}

272

274

275

276

277

278static std::unique_ptr createTLOF(const Triple &TT) {

279 if (TT.isOSBinFormatMachO())

280 return std::make_unique<AArch64_MachoTargetObjectFile>();

281 if (TT.isOSBinFormatCOFF())

282 return std::make_unique<AArch64_COFFTargetObjectFile>();

283

284 return std::make_unique<AArch64_ELFTargetObjectFile>();

285}

286

287

290 bool LittleEndian) {

291 if (TT.isOSBinFormatMachO()) {

293 return "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-"

294 "n32:64-S128-Fn32";

295 return "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-"

296 "Fn32";

297 }

298 if (TT.isOSBinFormatCOFF())

299 return "e-m:w-p270:32:32-p271:32:32-p272:64:64-p:64:64-i32:32-i64:64-i128:"

300 "128-n32:64-S128-Fn32";

301 std::string Endian = LittleEndian ? "e" : "E";

302 std::string Ptr32 = TT.getEnvironment() == Triple::GNUILP32 ? "-p:32:32" : "";

303 return Endian + "-m:e" + Ptr32 +

304 "-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-"

305 "n32:64-S128-Fn32";

306}

307

309 if (CPU.empty() && TT.isArm64e())

310 return "apple-a12";

311 return CPU;

312}

313

315 std::optionalReloc::Model RM) {

316

317 if (TT.isOSDarwin() || TT.isOSWindows())

319

320

321

324 return *RM;

325}

326

329 std::optionalCodeModel::Model CM, bool JIT) {

330 if (CM) {

334 "Only small, tiny and large code models are allowed on AArch64");

335 } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF())

337 return *CM;

338 }

339

340

341

342

343

344

345 if (JIT && !TT.isOSWindows())

348}

349

350

351

355 std::optionalReloc::Model RM,

356 std::optionalCodeModel::Model CM,

358 bool LittleEndian)

364 TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) {

366

367 if (TT.isOSBinFormatMachO()) {

370 }

371

373

374

375

376

377

378

380 }

381

382 if (this->Options.TLSSize == 0)

383 this->Options.TLSSize = 24;

386 this->Options.TLSSize > 32)

387

388 this->Options.TLSSize = 32;

390

391 this->Options.TLSSize = 24;

392

393

394

401 }

402

403

405

406

408

409

411

412

415}

416

418

421 Attribute CPUAttr = F.getFnAttribute("target-cpu");

422 Attribute TuneAttr = F.getFnAttribute("tune-cpu");

423 Attribute FSAttr = F.getFnAttribute("target-features");

424

428 bool HasMinSize = F.hasMinSize();

429

431 F.hasFnAttribute("aarch64_pstate_sm_enabled") ||

432 F.hasFnAttribute("aarch64_pstate_sm_body");

434 F.hasFnAttribute("aarch64_pstate_sm_compatible");

435

436 unsigned MinSVEVectorSize = 0;

437 unsigned MaxSVEVectorSize = 0;

438 if (F.hasFnAttribute(Attribute::VScaleRange)) {

442 } else {

445 }

446

447 assert(MinSVEVectorSize % 128 == 0 &&

448 "SVE requires vector length in multiples of 128!");

449 assert(MaxSVEVectorSize % 128 == 0 &&

450 "SVE requires vector length in multiples of 128!");

451 assert((MaxSVEVectorSize >= MinSVEVectorSize || MaxSVEVectorSize == 0) &&

452 "Minimum SVE vector size should not be larger than its maximum!");

453

454

455 if (MaxSVEVectorSize != 0) {

456 MinSVEVectorSize = std::min(MinSVEVectorSize, MaxSVEVectorSize);

457 MaxSVEVectorSize = std::max(MinSVEVectorSize, MaxSVEVectorSize);

458 }

459

462 << MaxSVEVectorSize << "IsStreaming=" << IsStreaming

463 << "IsStreamingCompatible=" << IsStreamingCompatible

464 << CPU << TuneCPU << FS

465 << "HasMinSize=" << HasMinSize;

466

468 if (I) {

469

470

471

473 I = std::make_unique(

474 TargetTriple, CPU, TuneCPU, FS, *this, isLittle, MinSVEVectorSize,

475 MaxSVEVectorSize, IsStreaming, IsStreamingCompatible, HasMinSize);

476 }

477

478 assert((!IsStreaming || I->hasSME()) && "Expected SME to be available");

479

480 return I.get();

481}

482

483void AArch64leTargetMachine::anchor() { }

484

488 std::optionalCodeModel::Model CM, CodeGenOptLevel OL, bool JIT)

490

491void AArch64beTargetMachine::anchor() { }

492

496 std::optionalCodeModel::Model CM, CodeGenOptLevel OL, bool JIT)

498

499namespace {

500

501

503public:

506 if (TM.getOptLevel() != CodeGenOptLevel::None)

509 }

510

512 return getTM();

513 }

514

521 if (ST.hasFusion())

523 return DAG;

524 }

525

530 new ScheduleDAGMI(C, std::make_unique(C),

531 true);

532 if (ST.hasFusion()) {

533

534

536 return DAG;

537 }

538

539 return DAG;

540 }

541

542 void addIRPasses() override;

543 bool addPreISel() override;

544 void addCodeGenPrepare() override;

545 bool addInstSelector() override;

546 bool addIRTranslator() override;

547 void addPreLegalizeMachineIR() override;

548 bool addLegalizeMachineIR() override;

549 void addPreRegBankSelect() override;

550 bool addRegBankSelect() override;

551 bool addGlobalInstructionSelect() override;

552 void addMachineSSAOptimization() override;

553 bool addILPOpts() override;

554 void addPreRegAlloc() override;

555 void addPostRegAlloc() override;

556 void addPreSched2() override;

557 void addPreEmitPass() override;

558 void addPostBBSections() override;

559 void addPreEmitPass2() override;

560 bool addRegAssignAndRewriteOptimized() override;

561

562 std::unique_ptr getCSEConfig() const override;

563};

564

565}

566

568

572 });

577 });

578}

579

583}

584

586 return new AArch64PassConfig(*this, PM);

587}

588

589std::unique_ptr AArch64PassConfig::getCSEConfig() const {

591}

592

593void AArch64PassConfig::addIRPasses() {

594

595

597

598

602

603

604

605

608 .forwardSwitchCondToPhi(true)

609 .convertSwitchRangeToICmp(true)

610 .convertSwitchToLookupTable(true)

611 .needCanonicalLoops(false)

612 .hoistCommonInsts(true)

613 .sinkCommonInsts(true)));

614

615

616

617

618

624 }

625

627

628

629

631

632

634

635

637 }

638

640

643

646

647

650

651

655 }

656

657

658

659

661

662

663 if (TM->getTargetTriple().isOSWindows()) {

664 if (TM->getTargetTriple().isWindowsArm64EC())

666 else

668 }

669

670 if (TM->Options.JMCInstrument)

672}

673

674

675bool AArch64PassConfig::addPreISel() {

676

677

680

681

682

686 bool OnlyOptimizeForSize =

689

690

691

692

693

694 bool MergeExternalByDefault = TM->getTargetTriple().isOSBinFormatMachO();

695

696

697

698 if (!OnlyOptimizeForSize)

699 MergeExternalByDefault = false;

700

702 MergeExternalByDefault));

703 }

704

705 return false;

706}

707

708void AArch64PassConfig::addCodeGenPrepare() {

712}

713

714bool AArch64PassConfig::addInstSelector() {

716

717

718

719 if (TM->getTargetTriple().isOSBinFormatELF() &&

722

723 return false;

724}

725

726bool AArch64PassConfig::addIRTranslator() {

728 return false;

729}

730

731void AArch64PassConfig::addPreLegalizeMachineIR() {

735 } else {

740 }

741}

742

743bool AArch64PassConfig::addLegalizeMachineIR() {

745 return false;

746}

747

748void AArch64PassConfig::addPreRegBankSelect() {

750 if (!IsOptNone) {

754 }

756}

757

758bool AArch64PassConfig::addRegBankSelect() {

760 return false;

761}

762

763bool AArch64PassConfig::addGlobalInstructionSelect() {

767 return false;

768}

769

770void AArch64PassConfig::addMachineSSAOptimization() {

773

774

776

779}

780

781bool AArch64PassConfig::addILPOpts() {

797 return true;

798}

799

800void AArch64PassConfig::addPreRegAlloc() {

801

805

806

809

810

812 }

815}

816

817void AArch64PassConfig::addPostRegAlloc() {

818

822

824

826}

827

828void AArch64PassConfig::addPreSched2() {

829

832

834

838 }

839

841

842

843

844

845

846

848

852 }

853}

854

855void AArch64PassConfig::addPreEmitPass() {

856

857

858

861

865

867

868 if (TM->getTargetTriple().isOSWindows()) {

869

871

873 }

874

876 TM->getTargetTriple().isOSBinFormatMachO())

878}

879

880void AArch64PassConfig::addPostBBSections() {

885

886

887 if (BranchRelaxation)

889

892}

893

894void AArch64PassConfig::addPreEmitPass2() {

895

896

898}

899

900bool AArch64PassConfig::addRegAssignAndRewriteOptimized() {

903}

904

908 return AArch64FunctionInfo::create(

910}

911

915}

916

921}

922

929 return false;

930}

cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))

static cl::opt< bool > EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden, cl::desc("Enable the AArch64 branch target pass"), cl::init(true))

static cl::opt< bool > EnableSVEIntrinsicOpts("aarch64-enable-sve-intrinsic-opts", cl::Hidden, cl::desc("Enable SVE intrinsic opts"), cl::init(true))

static cl::opt< bool > EnableAArch64CopyPropagation("aarch64-enable-copy-propagation", cl::desc("Enable the copy propagation with AArch64 copy instr"), cl::init(true), cl::Hidden)

static cl::opt< bool > BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true), cl::desc("Relax out of range conditional branches"))

static cl::opt< bool > EnablePromoteConstant("aarch64-enable-promote-const", cl::desc("Enable the promote constant pass"), cl::init(true), cl::Hidden)

static cl::opt< bool > EnableCondBrTuning("aarch64-enable-cond-br-tune", cl::desc("Enable the conditional branch tuning pass"), cl::init(true), cl::Hidden)

static cl::opt< bool > EnableSinkFold("aarch64-enable-sink-fold", cl::desc("Enable sinking and folding of instruction copies"), cl::init(true), cl::Hidden)

static cl::opt< bool > EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden, cl::desc("Enable the pass that removes dead" " definitions and replaces stores to" " them with stores to the zero" " register"), cl::init(true))

static cl::opt< bool > EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden, cl::desc("Enable optimizations on complex GEPs"), cl::init(false))

static cl::opt< bool > EnableSelectOpt("aarch64-select-opt", cl::Hidden, cl::desc("Enable select to branch optimizations"), cl::init(true))

static cl::opt< bool > EnableLoadStoreOpt("aarch64-enable-ldst-opt", cl::desc("Enable the load/store pair" " optimization pass"), cl::init(true), cl::Hidden)

cl::opt< bool > EnableHomogeneousPrologEpilog

static cl::opt< bool > EnableGISelLoadStoreOptPostLegal("aarch64-enable-gisel-ldst-postlegal", cl::desc("Enable GlobalISel's post-legalizer load/store optimization pass"), cl::init(false), cl::Hidden)

static StringRef computeDefaultCPU(const Triple &TT, StringRef CPU)

static cl::opt< unsigned > SVEVectorBitsMinOpt("aarch64-sve-vector-bits-min", cl::desc("Assume SVE vector registers are at least this big, " "with zero meaning no minimum size is assumed."), cl::init(0), cl::Hidden)

static cl::opt< bool > EnableMCR("aarch64-enable-mcr", cl::desc("Enable the machine combiner pass"), cl::init(true), cl::Hidden)

static cl::opt< cl::boolOrDefault > EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden, cl::desc("Enable the global merge pass"))

static cl::opt< bool > EnableStPairSuppress("aarch64-enable-stp-suppress", cl::desc("Suppress STP for AArch64"), cl::init(true), cl::Hidden)

static CodeModel::Model getEffectiveAArch64CodeModel(const Triple &TT, std::optional< CodeModel::Model > CM, bool JIT)

static cl::opt< bool > EnableCondOpt("aarch64-enable-condopt", cl::desc("Enable the condition optimizer pass"), cl::init(true), cl::Hidden)

static cl::opt< bool > ForceStreaming("force-streaming", cl::desc("Force the use of streaming code for all functions"), cl::init(false), cl::Hidden)

static cl::opt< bool > EnableCollectLOH("aarch64-enable-collect-loh", cl::desc("Enable the pass that emits the linker optimization hints (LOH)"), cl::init(true), cl::Hidden)

static cl::opt< bool > EnableGISelLoadStoreOptPreLegal("aarch64-enable-gisel-ldst-prelegal", cl::desc("Enable GlobalISel's pre-legalizer load/store optimization pass"), cl::init(true), cl::Hidden)

static cl::opt< bool > EnableRedundantCopyElimination("aarch64-enable-copyelim", cl::desc("Enable the redundant copy elimination pass"), cl::init(true), cl::Hidden)

static cl::opt< bool > EnableAtomicTidy("aarch64-enable-atomic-cfg-tidy", cl::Hidden, cl::desc("Run SimplifyCFG after expanding atomic operations" " to make use of cmpxchg flow-based information"), cl::init(true))

static cl::opt< bool > EnableAdvSIMDScalar("aarch64-enable-simd-scalar", cl::desc("Enable use of AdvSIMD scalar integer instructions"), cl::init(false), cl::Hidden)

static cl::opt< int > EnableGlobalISelAtO("aarch64-enable-global-isel-at-O", cl::Hidden, cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"), cl::init(0))

static cl::opt< bool > EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden, cl::desc("Enable the loop data prefetch pass"), cl::init(true))

static cl::opt< bool > EnableSMEPeepholeOpt("enable-aarch64-sme-peephole-opt", cl::init(true), cl::Hidden, cl::desc("Perform SME peephole optimization"))

LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64Target()

static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))

static cl::opt< bool > EnableMachinePipeliner("aarch64-enable-pipeliner", cl::desc("Enable Machine Pipeliner for AArch64"), cl::init(false), cl::Hidden)

static cl::opt< bool > EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix", cl::init(true), cl::Hidden)

static cl::opt< unsigned > SVEVectorBitsMaxOpt("aarch64-sve-vector-bits-max", cl::desc("Assume SVE vector registers are at most this big, " "with zero meaning no maximum size is assumed."), cl::init(0), cl::Hidden)

static cl::opt< bool > ForceStreamingCompatible("force-streaming-compatible", cl::desc("Force the use of streaming-compatible code for all functions"), cl::init(false), cl::Hidden)

static cl::opt< bool > EnableCompressJumpTables("aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true), cl::desc("Use smallest entry possible for jump tables"))

static cl::opt< bool > EnableCCMP("aarch64-enable-ccmp", cl::desc("Enable the CCMP formation pass"), cl::init(true), cl::Hidden)

This file a TargetTransformInfo::Concept conforming object specific to the AArch64 target machine.

This file contains the simple types necessary to represent the attributes associated with functions a...

Provides analysis for continuously CSEing during GISel passes.

#define LLVM_EXTERNAL_VISIBILITY

static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")

static cl::opt< bool > EnableGlobalMerge("enable-global-merge", cl::Hidden, cl::desc("Enable the global merge pass"), cl::init(true))

This file declares the IRTranslator pass.

static std::string computeDataLayout()

static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")

PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)

This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

Target-Independent Code Generator Pass Configuration Options pass.

This pass exposes codegen information to IR-level passes.

static std::unique_ptr< TargetLoweringObjectFile > createTLOF()

AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...

StringMap< std::unique_ptr< AArch64Subtarget > > SubtargetMap

MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override

Create the target's instance of MachineFunctionInfo.

void registerPassBuilderCallbacks(PassBuilder &PB) override

Allow the target to modify the pass pipeline.

const AArch64Subtarget * getSubtargetImpl() const =delete

yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override

Allocate and return a default initialized instance of the YAML representation for the MachineFunction...

~AArch64TargetMachine() override

yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override

Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.

bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override

Parse out the target's MachineFunctionInfo from the YAML reprsentation.

TargetPassConfig * createPassConfig(PassManagerBase &PM) override

Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...

void reset() override

Reset internal state.

AArch64TargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT, bool IsLittleEndian)

Create an AArch64 architecture model.

TargetTransformInfo getTargetTransformInfo(const Function &F) const override

Get a TargetTransformInfo implementation for the target.

AArch64beTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)

AArch64leTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)

uint64_t getZExtValue() const

Get zero extended value.

StringRef getValueAsString() const

Return the attribute's value as a string.

bool isValid() const

Return true if the attribute is any kind of attribute.

Allocate memory in an ever growing pool, as if by bump-pointer.

implements a set of functionality in the TargetMachine class for targets that make use of the indepen...

This class represents a range of values.

APInt getUnsignedMin() const

Return the smallest unsigned value contained in the ConstantRange.

APInt getUnsignedMax() const

Return the largest unsigned value contained in the ConstantRange.

Lightweight error class with error context and mandatory checking.

This pass is responsible for selecting generic machine instructions to target-specific instructions.

This pass implements the localization mechanism described at the top of this file.

Pass to replace calls to ifuncs with indirect calls.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

This class provides access to building LLVM's passes.

void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel, ThinOrFullLTOPhase)> &C)

Register a callback for a default optimizer pipeline extension point.

void registerLateLoopOptimizationsEPCallback(const std::function< void(LoopPassManager &, OptimizationLevel)> &C)

Register a callback for a default optimizer pipeline extension point.

LLVM_ATTRIBUTE_MINSIZE std::enable_if_t< is_detected< HasRunOnLoopT, PassT >::value > addPass(PassT &&Pass)

LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)

static PassRegistry * getPassRegistry()

getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...

This pass implements the reg bank selector pass used in the GlobalISel pipeline.

Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...

Represents a range in source code.

A ScheduleDAG for scheduling lists of MachineInstr.

ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...

ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...

SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...

StringRef - Represent a constant reference to a string, i.e.

constexpr bool empty() const

empty - Check if the string is empty.

CodeGenOptLevel getOptLevel() const

Returns the optimization level: None, Less, Default, or Aggressive.

void setSupportsDebugEntryValues(bool Enable)

Triple TargetTriple

Triple string, CPU name, and target feature strings the TargetMachine instance is created with.

const Triple & getTargetTriple() const

void setMachineOutliner(bool Enable)

void setCFIFixup(bool Enable)

void setSupportsDefaultOutlining(bool Enable)

void setGlobalISelAbort(GlobalISelAbortMode Mode)

std::unique_ptr< const MCSubtargetInfo > STI

void setGlobalISel(bool Enable)

CodeModel::Model getCodeModel() const

Returns the code model.

void resetTargetOptions(const Function &F) const

Reset the target options based on the function's attributes.

const MCAsmInfo * getMCAsmInfo() const

Return target specific asm information.

unsigned TLSSize

Bit size of immediate TLS offsets (0 == use the default).

unsigned NoTrapAfterNoreturn

Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...

unsigned TrapUnreachable

Emit target-specific trap instruction for 'unreachable' IR instructions.

Target-Independent Code Generator Pass Configuration Options.

virtual void addCodeGenPrepare()

Add pass to prepare the LLVM IR for code generation.

virtual void addIRPasses()

Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...

virtual void addMachineSSAOptimization()

addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.

virtual bool addRegAssignAndRewriteOptimized()

TargetSubtargetInfo - Generic base class for all target subtargets.

This pass provides access to the codegen interfaces that are needed for IR-level transformations.

Target - Wrapper for Target specific information.

Triple - Helper class for working with autoconf configuration names.

PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...

A raw_ostream that writes to an SmallVector or SmallString.

Interfaces for registering analysis passes, producing common pass manager configurations,...

@ C

The default llvm calling convention, compatible with C.

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

FunctionPass * createAArch64PreLegalizerCombiner()

void initializeLDTLSCleanupPass(PassRegistry &)

FunctionPass * createCFGSimplificationPass(SimplifyCFGOptions Options=SimplifyCFGOptions(), std::function< bool(const Function &)> Ftor=nullptr)

FunctionPass * createSMEABIPass()

void initializeAArch64A57FPLoadBalancingPass(PassRegistry &)

FunctionPass * createAArch64PostSelectOptimize()

void initializeAArch64SpeculationHardeningPass(PassRegistry &)

void initializeAArch64PostLegalizerLoweringPass(PassRegistry &)

FunctionPass * createAArch64RedundantCopyEliminationPass()

FunctionPass * createAArch64StackTaggingPreRAPass()

FunctionPass * createTypePromotionLegacyPass()

Create IR Type Promotion pass.

void initializeAArch64PostLegalizerCombinerPass(PassRegistry &)

FunctionPass * createAArch64MIPeepholeOptPass()

void initializeAArch64AdvSIMDScalarPass(PassRegistry &)

void initializeAArch64PostCoalescerPass(PassRegistry &)

FunctionPass * createSelectOptimizePass()

This pass converts conditional moves to conditional jumps when profitable.

Pass * createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset, bool OnlyOptimizeForSize=false, bool MergeExternalByDefault=false, bool MergeConstantByDefault=false, bool MergeConstAggressiveByDefault=false)

GlobalMerge - This pass merges internal (by default) globals into structs to enable reuse of a base p...

FunctionPass * createAArch64PostCoalescerPass()

void initializeAArch64PromoteConstantPass(PassRegistry &)

FunctionPass * createFalkorMarkStridedAccessesPass()

Target & getTheAArch64beTarget()

FunctionPass * createAArch64PointerAuthPass()

FunctionPass * createFalkorHWPFFixPass()

char & PostRASchedulerID

PostRAScheduler - This pass performs post register allocation scheduling.

FunctionPass * createAArch64O0PreLegalizerCombiner()

FunctionPass * createAArch64A57FPLoadBalancing()

FunctionPass * createAArch64CondBrTuning()

std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)

void initializeSMEABIPass(PassRegistry &)

char & PostMachineSchedulerID

PostMachineScheduler - This pass schedules machine instructions postRA.

char & PeepholeOptimizerLegacyID

PeepholeOptimizer - This pass performs peephole optimizations - like extension and comparison elimina...

ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)

Create the standard converging machine scheduler.

Target & getTheAArch64leTarget()

FunctionPass * createAArch64DeadRegisterDefinitions()

char & EarlyIfConverterLegacyID

EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.

FunctionPass * createSMEPeepholeOptPass()

FunctionPass * createAArch64PostLegalizerLowering()

ThinOrFullLTOPhase

This enumerates the LLVM full LTO or ThinLTO optimization phases.

ModulePass * createJMCInstrumenterPass()

JMC instrument pass.

char & MachineCombinerID

This pass performs instruction combining using trace metrics to estimate critical-path and resource d...

static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)

FunctionPass * createAArch64CompressJumpTablesPass()

Target & getTheAArch64_32Target()

FunctionPass * createAArch64ConditionalCompares()

char & BranchRelaxationPassID

BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...

void initializeFalkorMarkStridedAccessesLegacyPass(PassRegistry &)

void initializeAArch64ExpandPseudoPass(PassRegistry &)

void initializeAArch64DeadRegisterDefinitionsPass(PassRegistry &)

void initializeAArch64StackTaggingPass(PassRegistry &)

FunctionPass * createAArch64ExpandPseudoPass()

Returns an instance of the pseudo instruction expansion pass.

FunctionPass * createKCFIPass()

Lowers KCFI operand bundles for indirect calls.

std::unique_ptr< ScheduleDAGMutation > createAArch64MacroFusionDAGMutation()

Note that you have to add: DAG.addMutation(createAArch64MacroFusionDAGMutation()); to AArch64PassConf...

FunctionPass * createComplexDeinterleavingPass(const TargetMachine *TM)

This pass implements generation of target-specific intrinsics to support handling of complex number a...

ModulePass * createAArch64Arm64ECCallLoweringPass()

std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)

If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...

FunctionPass * createLoopDataPrefetchPass()

FunctionPass * createAArch64SIMDInstrOptPass()

Returns an instance of the high cost ASIMD instruction replacement optimization pass.

void initializeSMEPeepholeOptPass(PassRegistry &)

void report_fatal_error(Error Err, bool gen_crash_diag=true)

Report a serious error, calling any installed error handler.

FunctionPass * createAArch64StorePairSuppressPass()

FunctionPass * createAArch64ConditionOptimizerPass()

ModulePass * createSVEIntrinsicOptsPass()

void initializeAArch64CompressJumpTablesPass(PassRegistry &)

void initializeAArch64SLSHardeningPass(PassRegistry &)

FunctionPass * createAArch64CollectLOHPass()

ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)

Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...

CodeGenOptLevel

Code generation optimization level.

FunctionPass * createAArch64LoadStoreOptimizationPass()

createAArch64LoadStoreOptimizationPass - returns an instance of the load / store optimization pass.

void initializeAArch64StackTaggingPreRAPass(PassRegistry &)

void initializeAArch64PreLegalizerCombinerPass(PassRegistry &)

Target & getTheARM64_32Target()

FunctionPass * createCFGuardLongjmpPass()

Creates CFGuard longjmp target identification pass.

FunctionPass * createAArch64PostLegalizerCombiner(bool IsOptNone)

void initializeAArch64StorePairSuppressPass(PassRegistry &)

void initializeAArch64LowerHomogeneousPrologEpilogPass(PassRegistry &)

FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)

FunctionPass * createInterleavedAccessPass()

InterleavedAccess Pass - This pass identifies and matches interleaved memory accesses to target speci...

void initializeGlobalISel(PassRegistry &)

Initialize all passes linked into the GlobalISel library.

FunctionPass * createAArch64ISelDag(AArch64TargetMachine &TM, CodeGenOptLevel OptLevel)

createAArch64ISelDag - This pass converts a legalized DAG into a AArch64-specific DAG,...

char & MachinePipelinerID

This pass performs software pipelining on machine instructions.

void initializeAArch64MIPeepholeOptPass(PassRegistry &)

FunctionPass * createAArch64SLSHardeningPass()

FunctionPass * createAArch64BranchTargetsPass()

Target & getTheARM64Target()

std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)

If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...

void initializeFalkorHWPFFixPass(PassRegistry &)

FunctionPass * createUnpackMachineBundles(std::function< bool(const MachineFunction &)> Ftor)

void initializeKCFIPass(PassRegistry &)

void initializeAArch64BranchTargetsPass(PassRegistry &)

FunctionPass * createCFGuardCheckPass()

Insert Control FLow Guard checks on indirect function calls.

void initializeAArch64A53Fix835769Pass(PassRegistry &)

ModulePass * createAArch64LowerHomogeneousPrologEpilogPass()

void initializeAArch64LoadStoreOptPass(PassRegistry &)

void initializeAArch64SIMDInstrOptPass(PassRegistry &)

void initializeAArch64PostSelectOptimizePass(PassRegistry &)

void initializeAArch64CollectLOHPass(PassRegistry &)

FunctionPass * createAArch64StackTaggingPass(bool IsOptNone)

void initializeAArch64O0PreLegalizerCombinerPass(PassRegistry &)

void initializeAArch64ConditionOptimizerPass(PassRegistry &)

void initializeAArch64ConditionalComparesPass(PassRegistry &)

FunctionPass * createAtomicExpandLegacyPass()

AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...

FunctionPass * createAArch64CleanupLocalDynamicTLSPass()

FunctionPass * createEHContGuardCatchretPass()

Creates EHContGuard catchret target identification pass.

ModulePass * createAArch64PromoteConstantPass()

FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)

MachineFunctionPass * createMachineCopyPropagationPass(bool UseCopyInstr)

FunctionPass * createAArch64AdvSIMDScalar()

void initializeAArch64DAGToDAGISelLegacyPass(PassRegistry &)

FunctionPass * createAArch64SpeculationHardeningPass()

Returns an instance of the pseudo instruction expansion pass.

void initializeSVEIntrinsicOptsPass(PassRegistry &)

void initializeAArch64PointerAuthPass(PassRegistry &)

void initializeAArch64RedundantCopyEliminationPass(PassRegistry &)

FunctionPass * createInterleavedLoadCombinePass()

InterleavedLoadCombines Pass - This pass identifies interleaved loads and combines them into wide loa...

FunctionPass * createAArch64A53Fix835769()

MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...

MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...

RegisterTargetMachine - Helper template for registering a target machine implementation,...

Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.