LLVM: lib/Target/ARM/ARMTargetTransformInfo.h Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17#ifndef LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H

18#define LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H

19

29#include

30

31namespace llvm {

32

41

51

52

56

60

61 friend BaseT;

62

65

66

67

68

69

70

71

73 ARM::FeatureVFP2, ARM::FeatureVFP3, ARM::FeatureNEON, ARM::FeatureThumb2,

74 ARM::FeatureFP16, ARM::FeatureVFP4, ARM::FeatureFPARMv8,

75 ARM::FeatureFullFP16, ARM::FeatureFP16FML, ARM::FeatureHWDivThumb,

76 ARM::FeatureHWDivARM, ARM::FeatureDB, ARM::FeatureV7Clrex,

77 ARM::FeatureAcquireRelease, ARM::FeatureSlowFPBrcc,

78 ARM::FeaturePerfMon, ARM::FeatureTrustZone, ARM::Feature8MSecExt,

79 ARM::FeatureCrypto, ARM::FeatureCRC, ARM::FeatureRAS,

80 ARM::FeatureFPAO, ARM::FeatureFuseAES, ARM::FeatureZCZeroing,

81 ARM::FeatureProfUnpredicate, ARM::FeatureSlowVGETLNi32,

82 ARM::FeatureSlowVDUP32, ARM::FeaturePreferVMOVSR,

83 ARM::FeaturePrefISHSTBarrier, ARM::FeatureMuxedUnits,

84 ARM::FeatureSlowOddRegister, ARM::FeatureSlowLoadDSubreg,

85 ARM::FeatureDontWidenVMOVS, ARM::FeatureExpandMLx,

86 ARM::FeatureHasVMLxHazards, ARM::FeatureNEONForFPMovs,

87 ARM::FeatureNEONForFP, ARM::FeatureCheckVLDnAlign,

88 ARM::FeatureHasSlowFPVMLx, ARM::FeatureHasSlowFPVFMx,

89 ARM::FeatureVMLxForwarding, ARM::FeaturePref32BitThumb,

90 ARM::FeatureAvoidPartialCPSR, ARM::FeatureCheapPredicableCPSR,

91 ARM::FeatureAvoidMOVsShOp, ARM::FeatureHasRetAddrStack,

92 ARM::FeatureHasNoBranchPredictor, ARM::FeatureDSP, ARM::FeatureMP,

93 ARM::FeatureVirtualization, ARM::FeatureMClass, ARM::FeatureRClass,

94 ARM::FeatureAClass, ARM::FeatureStrictAlign, ARM::FeatureLongCalls,

95 ARM::FeatureExecuteOnly, ARM::FeatureReserveR9, ARM::FeatureNoMovt,

96 ARM::FeatureNoNegativeImmediates

97 };

98

99 const ARMSubtarget *getST() const { return ST; }

101

102public:

104 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),

105 TLI(ST->getTargetLowering()) {}

106

108 const Function *Callee) const override;

109

111

114

115

116

117

119 return !ST->isTargetDarwin() && !ST->hasMVEFloatOps();

120 }

121

122 std::optional<Instruction *>

126 APInt &UndefElts2, APInt &UndefElts3,

128 SimplifyAndSetOp) const override;

129

130

131

132

134 const APInt &Imm,

135 Type *Ty) const override;

136

140

144 Instruction *Inst = nullptr) const override;

145

146

147

148

149

150

152 bool Vector = (ClassID == 1);

154 if (ST->hasNEON())

155 return 16;

156 if (ST->hasMVEIntegerOps())

157 return 8;

158 return 0;

159 }

160

161 if (ST->isThumb1Only())

162 return 8;

163 return 13;

164 }

165

168 switch (K) {

172 if (ST->hasNEON())

174 if (ST->hasMVEIntegerOps())

179 }

181 }

182

184 return ST->getMaxInterleaveFactor();

185 }

186

188

189 bool

193

194 bool

200

202 Align Alignment) const override {

203

204

205

206

207 return true;

208 }

209

211 Align Alignment) const override {

213 }

214

216

220

222

224 return ST->getMaxInlineSizeThreshold();

225 }

226

228

233 const Instruction *CxtI = nullptr) const override;

234

236

238

242

244 const Instruction *I = nullptr) const override;

245

249 const Instruction *I = nullptr) const override;

250

256 const Instruction *I = nullptr) const override;

257

261 unsigned Index, const Value *Op0,

262 const Value *Op1) const override;

263

267

273 const Instruction *CxtI = nullptr) const override;

274

276 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,

279 const Instruction *I = nullptr) const override;

280

284

287

289 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef Indices,

291 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override;

292

295

298 std::optional FMF,

302 VectorType *ValTy, std::optional FMF,

306 VectorType *ValTy,

308

312

316

317

318

319

320

322 StackOffset BaseOffset, bool HasBaseReg,

323 int64_t Scale,

324 unsigned AddrSpace) const override;

325

329 AssumptionCache &AC, TargetLibraryInfo *LibInfo,

330 HardwareLoopInfo &HWLoopInfo) const override;

334 OptimizationRemarkEmitter *ORE) const override;

335

338

342

343

344

345 if (ST->isROPI() || ST->isRWPI())

346 return C->needsDynamicRelocation();

347

348 return true;

349 }

350

352

355

358

359

360};

361

362

363

364

367 "Only possible block sizes for VREV are: 16, 32, 64");

368

370 if (EltSz != 8 && EltSz != 16 && EltSz != 32)

371 return false;

372

373 unsigned BlockElts = M[0] + 1;

374

375 if (M[0] < 0)

377

379 return false;

380

381 for (unsigned i = 0, e = M.size(); i < e; ++i) {

382 if (M[i] < 0)

383 continue;

384 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))

385 return false;

386 }

387

388 return true;

389}

390

391}

392

393#endif

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

This file provides a helper that implements much of the TTI interface in terms of the target-independ...

static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

uint64_t IntrinsicInst * II

static const int BlockSize

This pass exposes codegen information to IR-level passes.

Class for arbitrary precision integers.

InstructionCost getGatherScatterOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const

TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override

Definition ARMTargetTransformInfo.h:167

InstructionCost getAddressComputationCost(Type *Val, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const override

TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override

bool isFPVectorizationPotentiallyUnsafe() const override

Floating-point computation using ARMv8 AArch32 Advanced SIMD instructions remains unchanged from ARMv...

Definition ARMTargetTransformInfo.h:118

InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const

InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override

InstructionCost getMemcpyCost(const Instruction *I) const override

bool isLegalMaskedScatter(Type *Ty, Align Alignment) const override

Definition ARMTargetTransformInfo.h:217

bool maybeLoweredToCall(Instruction &I) const

bool preferInLoopReduction(RecurKind Kind, Type *Ty) const override

InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override

InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *ValTy, TTI::TargetCostKind CostKind) const override

InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override

InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override

bool hasArmWideBranch(bool Thumb) const override

bool shouldExpandReduction(const IntrinsicInst *II) const override

Definition ARMTargetTransformInfo.h:239

bool shouldBuildLookupTablesForConstant(Constant *C) const override

Definition ARMTargetTransformInfo.h:341

InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override

int getNumMemOps(const IntrinsicInst *I) const

Given a memcpy/memset/memmove instruction, return the number of memory operations performed,...

InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override

InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) const override

bool isLoweredToCall(const Function *F) const override

InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override

bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override

Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...

uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override

Definition ARMTargetTransformInfo.h:223

bool isLegalMaskedStore(Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind=TTI::MaskKind::VariableOrConstantMask) const override

Definition ARMTargetTransformInfo.h:195

bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const override

Definition ARMTargetTransformInfo.h:210

InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override

InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override

std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override

bool isLegalMaskedLoad(Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind=TTI::MaskKind::VariableOrConstantMask) const override

ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F)

Definition ARMTargetTransformInfo.h:103

InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override

InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override

InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override

std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override

void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override

InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override

TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override

bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override

bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const override

Definition ARMTargetTransformInfo.h:201

unsigned getNumberOfRegisters(unsigned ClassID) const override

Definition ARMTargetTransformInfo.h:151

InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override

bool areInlineCompatible(const Function *Caller, const Function *Callee) const override

InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override

bool preferPredicatedReductionSelect() const override

bool isLegalMaskedGather(Type *Ty, Align Alignment) const override

unsigned getMaxInterleaveFactor(ElementCount VF) const override

Definition ARMTargetTransformInfo.h:183

unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const override

bool isProfitableLSRChainElement(Instruction *I) const override

bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override

void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override

bool enableInterleavedAccessVectorization() const override

Definition ARMTargetTransformInfo.h:110

InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override

getScalingFactorCost - Return the cost of the scaling used in addressing mode represented by AM.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

Class to represent array types.

InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override

BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

This is an important base class in LLVM.

Container class for subtarget features.

The core instruction combiner logic.

A wrapper class for inspecting calls to intrinsic functions.

Represents a single loop in the control flow graph.

This class represents an analyzed expression in the program.

The main scalar evolution driver.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

virtual const DataLayout & getDataLayout() const

virtual InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const

This pass provides access to the codegen interfaces that are needed for IR-level transformations.

MaskKind

Some targets only support masked load/store with a constant mask.

TargetCostKind

The kind of cost model.

AddressingModeKind

Which addressing mode Loop Strength Reduction will try to generate.

ShuffleKind

The various kinds of shuffle patterns for vector queries.

CastContextHint

Represents a hint about the context in which a cast is used.

static constexpr TypeSize getFixed(ScalarTy ExactSize)

static constexpr TypeSize getScalable(ScalarTy MinimumSize)

The instances of the Type class are immutable: once they are created, they are never changed.

LLVM Value Representation.

Base class of all SIMD vector types.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Args[]

Key for Kernel::Metadata::mArgs.

@ C

The default llvm calling convention, compatible with C.

MemTransfer

Definition ARMTargetTransformInfo.h:54

@ Allow

Definition ARMTargetTransformInfo.h:54

@ ForceDisabled

Definition ARMTargetTransformInfo.h:54

@ ForceEnabled

Definition ARMTargetTransformInfo.h:54

Mode

Definition ARMTargetTransformInfo.h:43

@ ForceEnabled

Definition ARMTargetTransformInfo.h:48

@ Disabled

Definition ARMTargetTransformInfo.h:44

@ EnabledNoReductions

Definition ARMTargetTransformInfo.h:45

@ ForceEnabledNoReductions

Definition ARMTargetTransformInfo.h:47

@ Enabled

Definition ARMTargetTransformInfo.h:46

friend class Instruction

Iterator for Instructions in a `BasicBlock.

This is an optimization pass for GlobalISel generic memory operations.

FunctionAddr VTableAddr Value

ArrayRef(const T &OneElt) -> ArrayRef< T >

bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)

isVREVMask - Check if a vector shuffle corresponds to a VREV instruction with the specified blocksize...

Definition ARMTargetTransformInfo.h:365

This struct is a compact representation of a valid (non-zero power of two) alignment.

uint64_t getScalarSizeInBits() const

Parameters that control the generic loop unrolling transformation.