LLVM: llvm::BasicTTIImplBase< T > Class Template Reference (original) (raw)

Base class which can be used to help build a TTI implementation. More...

#include "[llvm/CodeGen/BasicTTIImpl.h](BasicTTIImpl%5F8h%5Fsource.html)"

Public Member Functions
Scalar TTI Implementations
bool allowsMisalignedMemoryAccesses (LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
bool areInlineCompatible (const Function *Caller, const Function *Callee) const override
bool hasBranchDivergence (const Function *F=nullptr) const override
bool isSourceOfDivergence (const Value *V) const override
bool isAlwaysUniform (const Value *V) const override
bool isValidAddrSpaceCast (unsigned FromAS, unsigned ToAS) const override
bool addrspacesMayAlias (unsigned AS0, unsigned AS1) const override
unsigned getFlatAddressSpace () const override
bool collectFlatAddressOperands (SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
bool isNoopAddrSpaceCast (unsigned FromAS, unsigned ToAS) const override
unsigned getAssumedAddrSpace (const Value *V) const override
bool isSingleThreaded () const override
std::pair< const Value *, unsigned > getPredicatedAddrSpace (const Value *V) const override
Value * rewriteIntrinsicWithAddressSpace (IntrinsicInst *II, Value *OldV, Value *NewV) const override
bool isLegalAddImmediate (int64_t imm) const override
bool isLegalAddScalableImmediate (int64_t Imm) const override
bool isLegalICmpImmediate (int64_t imm) const override
bool isLegalAddressingMode (Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
int64_t getPreferredLargeGEPBaseOffset (int64_t MinOffset, int64_t MaxOffset)
unsigned getStoreMinimumVF (unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
bool isIndexedLoadLegal (TTI::MemIndexedMode M, Type *Ty) const override
bool isIndexedStoreLegal (TTI::MemIndexedMode M, Type *Ty) const override
bool isLSRCostLess (const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
bool isNumRegsMajorCostOfLSR () const override
bool shouldDropLSRSolutionIfLessProfitable () const override
bool isProfitableLSRChainElement (Instruction *I) const override
InstructionCost getScalingFactorCost (Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
bool isTruncateFree (Type *Ty1, Type *Ty2) const override
bool isProfitableToHoist (Instruction *I) const override
bool useAA () const override
bool isTypeLegal (Type *Ty) const override
unsigned getRegUsageForType (Type *Ty) const override
InstructionCost getGEPCost (Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getEstimatedNumberOfCaseClusters (const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
bool shouldBuildLookupTables () const override
bool shouldBuildRelLookupTables () const override
bool haveFastSqrt (Type *Ty) const override
bool isFCmpOrdCheaperThanFCmpZero (Type *Ty) const override
InstructionCost getFPOpCost (Type *Ty) const override
bool preferToKeepConstantsAttached (const Instruction &Inst, const Function &Fn) const override
unsigned getInliningThresholdMultiplier () const override
unsigned adjustInliningThreshold (const CallBase *CB) const override
unsigned getCallerAllocaCost (const CallBase *CB, const AllocaInst *AI) const override
int getInlinerVectorBonusPercent () const override
void getUnrollingPreferences (Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
void getPeelingPreferences (Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
bool isHardwareLoopProfitable (Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getEpilogueVectorizationMinVF () const override
bool preferPredicateOverEpilogue (TailFoldingInfo *TFI) const override
TailFoldingStyle getPreferredTailFoldingStyle (bool IVUpdateMayOverflow=true) const override
std::optional< Instruction * > instCombineIntrinsic (InstCombiner &IC, IntrinsicInst &II) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic (InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic (InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
std::optional< unsigned > getCacheSize (TargetTransformInfo::CacheLevel Level) const override
std::optional< unsigned > getCacheAssociativity (TargetTransformInfo::CacheLevel Level) const override
unsigned getCacheLineSize () const override
unsigned getPrefetchDistance () const override
unsigned getMinPrefetchStride (unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
unsigned getMaxPrefetchIterationsAhead () const override
bool enableWritePrefetching () const override
bool shouldPrefetchAddressSpace (unsigned AS) const override
Vector TTI Implementations
TypeSize getRegisterBitWidth (TargetTransformInfo::RegisterKind K) const override
std::optional< unsigned > getMaxVScale () const override
std::optional< unsigned > getVScaleForTuning () const override
bool isVScaleKnownToBeAPowerOfTwo () const override
InstructionCost getScalarizationOverhead (VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
bool isTargetIntrinsicTriviallyScalarizable (Intrinsic::ID ID) const override
bool isTargetIntrinsicWithScalarOpAtArg (Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
bool isTargetIntrinsicWithOverloadTypeAtArg (Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithStructReturnOverloadAtField (Intrinsic::ID ID, int RetIdx) const override
InstructionCost getScalarizationOverhead (VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
InstructionCost getOperandsScalarizationOverhead (ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Estimate the overhead of scalarizing an instruction's operands.
InstructionCost getScalarizationOverhead (VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy and arguments Args of type Tys.
std::pair< InstructionCost, MVT > getTypeLegalizationCost (Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
unsigned getMaxInterleaveFactor (ElementCount VF) const override
InstructionCost getArithmeticInstrCost (unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
TTI::ShuffleKind improveShuffleKindFromMask (TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
InstructionCost getShuffleCost (TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getCastInstrCost (unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getExtractWithExtendCost (unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getCFInstrCost (unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getCmpSelInstrCost (unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getVectorInstrCost (unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getVectorInstrCost (unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) const override
InstructionCost getVectorInstrCost (const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getIndexedVectorInstrCostFromEnd (unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getReplicationShuffleCost (Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
InstructionCost getMemoryOpCost (unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getInterleavedMemoryOpCost (unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getIntrinsicInstrCost (const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
InstructionCost getTypeBasedIntrinsicInstrCost (const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
InstructionCost getMemIntrinsicInstrCost (const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
InstructionCost getCallInstrCost (Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
unsigned getNumberOfParts (Type *Tp) const override
InstructionCost getAddressComputationCost (Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
InstructionCost getTreeReductionCost (unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
InstructionCost getOrderedReductionCost (unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence of floating point additions in lane order, starting with an initial value.
InstructionCost getArithmeticReductionCost (unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getMinMaxReductionCost (Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
InstructionCost getExtendedReductionCost (unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getMulAccReductionCost (bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost () const
Public Member Functions inherited from llvm::TargetTransformInfoImplCRTPBase< T >
InstructionCost getGEPCost (Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
InstructionCost getPointersChainCost (ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind) const override
InstructionCost getInstructionCost (const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const override
bool isExpensiveToSpeculativelyExecute (const Instruction *I) const override
bool supportsTailCallFor (const CallBase *CB) const override
Public Member Functions inherited from llvm::TargetTransformInfoImplBase
virtual ~TargetTransformInfoImplBase ()
TargetTransformInfoImplBase (const TargetTransformInfoImplBase &Arg)=default
TargetTransformInfoImplBase (TargetTransformInfoImplBase &&Arg)
virtual const DataLayout & getDataLayout () const
virtual unsigned getInliningCostBenefitAnalysisSavingsMultiplier () const
virtual unsigned getInliningCostBenefitAnalysisProfitableMultiplier () const
virtual int getInliningLastCallToStaticBonus () const
virtual InstructionCost getMemcpyCost (const Instruction *I) const
virtual uint64_t getMaxMemIntrinsicInlineSizeThreshold () const
virtual BranchProbability getPredictableBranchThreshold () const
virtual InstructionCost getBranchMispredictPenalty () const
virtual bool canHaveNonUndefGlobalInitializerInAddressSpace (unsigned AS) const
virtual bool isLoweredToCall (const Function *F) const
virtual bool canMacroFuseCmp () const
virtual bool canSaveCmp (Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const
virtual TTI::AddressingModeKind getPreferredAddressingMode (const Loop *L, ScalarEvolution *SE) const
virtual bool isLegalMaskedStore (Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind) const
virtual bool isLegalMaskedLoad (Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind) const
virtual bool isLegalNTStore (Type *DataType, Align Alignment) const
virtual bool isLegalNTLoad (Type *DataType, Align Alignment) const
virtual bool isLegalBroadcastLoad (Type *ElementTy, ElementCount NumElements) const
virtual bool isLegalMaskedScatter (Type *DataType, Align Alignment) const
virtual bool isLegalMaskedGather (Type *DataType, Align Alignment) const
virtual bool forceScalarizeMaskedGather (VectorType *DataType, Align Alignment) const
virtual bool forceScalarizeMaskedScatter (VectorType *DataType, Align Alignment) const
virtual bool isLegalMaskedCompressStore (Type *DataType, Align Alignment) const
virtual bool isLegalAltInstr (VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const
virtual bool isLegalMaskedExpandLoad (Type *DataType, Align Alignment) const
virtual bool isLegalStridedLoadStore (Type *DataType, Align Alignment) const
virtual bool isLegalInterleavedAccessType (VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace) const
virtual bool isLegalMaskedVectorHistogram (Type *AddrType, Type *DataType) const
virtual bool enableOrderedReductions () const
virtual bool hasDivRemOp (Type *DataType, bool IsSigned) const
virtual bool hasVolatileVariant (Instruction *I, unsigned AddrSpace) const
virtual bool prefersVectorizedAddressing () const
virtual bool LSRWithInstrQueries () const
virtual bool shouldBuildLookupTablesForConstant (Constant *C) const
virtual bool useColdCCForColdCall (Function &F) const
virtual bool useFastCCForInternalCall (Function &F) const
virtual bool supportsEfficientVectorElementLoadStore () const
virtual bool supportsTailCalls () const
virtual bool enableAggressiveInterleaving (bool LoopHasReductions) const
virtual TTI::MemCmpExpansionOptions enableMemCmpExpansion (bool OptSize, bool IsZeroCmp) const
virtual bool enableSelectOptimize () const
virtual bool shouldTreatInstructionLikeSelect (const Instruction *I) const
virtual bool enableInterleavedAccessVectorization () const
virtual bool enableMaskedInterleavedAccessVectorization () const
virtual bool isFPVectorizationPotentiallyUnsafe () const
virtual TTI::PopcntSupportKind getPopcntSupport (unsigned IntTyWidthInBit) const
virtual InstructionCost getIntImmCodeSizeCost (unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) const
virtual InstructionCost getIntImmCost (const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const
virtual InstructionCost getIntImmCostInst (unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const
virtual InstructionCost getIntImmCostIntrin (Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const
virtual unsigned getNumberOfRegisters (unsigned ClassID) const
virtual bool hasConditionalLoadStoreForType (Type *Ty, bool IsStore) const
virtual unsigned getRegisterClassForType (bool Vector, Type *Ty=nullptr) const
virtual const char * getRegisterClassName (unsigned ClassID) const
virtual unsigned getMinVectorRegisterBitWidth () const
virtual bool shouldMaximizeVectorBandwidth (TargetTransformInfo::RegisterKind K) const
virtual ElementCount getMinimumVF (unsigned ElemWidth, bool IsScalable) const
virtual unsigned getMaximumVF (unsigned ElemWidth, unsigned Opcode) const
virtual bool shouldConsiderAddressTypePromotion (const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const
virtual std::optional< unsigned > getMinPageSize () const
virtual InstructionCost getPartialReductionCost (unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, TTI::PartialReductionExtendKind OpAExtend, TTI::PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind) const
virtual InstructionCost getAltInstrCost (VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const
virtual InstructionCost getInsertExtractValueCost (unsigned Opcode, TTI::TargetCostKind CostKind) const
virtual InstructionCost getCostOfKeepingLiveOverCall (ArrayRef< Type * > Tys) const
virtual bool getTgtMemIntrinsic (IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
virtual unsigned getAtomicMemIntrinsicMaxElementSize () const
virtual Value * getOrCreateResultFromMemIntrinsic (IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate=true) const
virtual Type * getMemcpyLoopLoweringType (LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize) const
virtual void getMemcpyLoopResidualLoweringType (SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize) const
virtual unsigned getInlineCallPenalty (const Function *F, const CallBase &Call, unsigned DefaultCallPenalty) const
virtual bool areTypesABICompatible (const Function *Caller, const Function *Callee, ArrayRef< Type * > Types) const
virtual unsigned getLoadStoreVecRegBitWidth (unsigned AddrSpace) const
virtual bool isLegalToVectorizeLoad (LoadInst *LI) const
virtual bool isLegalToVectorizeStore (StoreInst *SI) const
virtual bool isLegalToVectorizeLoadChain (unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
virtual bool isLegalToVectorizeStoreChain (unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
virtual bool isLegalToVectorizeReduction (const RecurrenceDescriptor &RdxDesc, ElementCount VF) const
virtual bool isElementTypeLegalForScalableVector (Type *Ty) const
virtual unsigned getLoadVectorFactor (unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
virtual unsigned getStoreVectorFactor (unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
virtual bool preferFixedOverScalableIfEqualCost (bool IsEpilogue) const
virtual bool preferInLoopReduction (RecurKind Kind, Type *Ty) const
virtual bool preferAlternateOpcodeVectorization () const
virtual bool preferPredicatedReductionSelect () const
virtual bool preferEpilogueVectorization () const
virtual bool shouldConsiderVectorizationRegPressure () const
virtual bool shouldExpandReduction (const IntrinsicInst *II) const
virtual TTI::ReductionShuffle getPreferredExpandedReductionShuffle (const IntrinsicInst *II) const
virtual unsigned getGISelRematGlobalCost () const
virtual unsigned getMinTripCountTailFoldingThreshold () const
virtual bool supportsScalableVectors () const
virtual bool enableScalableVectorization () const
virtual bool hasActiveVectorLength () const
virtual bool isProfitableToSinkOperands (Instruction *I, SmallVectorImpl< Use * > &Ops) const
virtual bool isVectorShiftByScalarCheap (Type *Ty) const
virtual TargetTransformInfo::VPLegalization getVPLegalizationStrategy (const VPIntrinsic &PI) const
virtual bool hasArmWideBranch (bool) const
virtual APInt getFeatureMask (const Function &F) const
virtual bool isMultiversionedFunction (const Function &F) const
virtual unsigned getMaxNumArgs () const
virtual unsigned getNumBytesToPadGlobalArray (unsigned Size, Type *ArrayType) const
virtual void collectKernelLaunchBounds (const Function &F, SmallVectorImpl< std::pair< StringRef, int64_t > > &LB) const
virtual bool allowVectorElementIndexingUsingGEP () const
Protected Member Functions
BasicTTIImplBase (const TargetMachine *TM, const DataLayout &DL)
~BasicTTIImplBase () override=default
Protected Member Functions inherited from llvm::TargetTransformInfoImplCRTPBase< T >
TargetTransformInfoImplCRTPBase (const DataLayout &DL)
Protected Member Functions inherited from llvm::TargetTransformInfoImplBase
TargetTransformInfoImplBase (const DataLayout &DL)
unsigned minRequiredElementSize (const Value *Val, bool &isSigned) const
bool isStridedAccess (const SCEV *Ptr) const
const SCEVConstant * getConstantStrideStep (ScalarEvolution *SE, const SCEV *Ptr) const
bool isConstantStridedAccessLessThan (ScalarEvolution *SE, const SCEV *Ptr, int64_t MergeDistance) const

template<typename T>
class llvm::BasicTTIImplBase< T >

Base class which can be used to help build a TTI implementation.

This class provides as much implementation of the TTI interface as is possible using the target independent parts of the code generator.

In order to subclass it, your class must implement a getST() method to return the subtarget, and a getTLI() method to return the target lowering. We need these methods implemented in the derived class so that this class doesn't have to duplicate storage for them.

Definition at line 82 of file BasicTTIImpl.h.

~BasicTTIImplBase()

addrspacesMayAlias()

adjustInliningThreshold()

allowsMisalignedMemoryAccesses()

areInlineCompatible()

collectFlatAddressOperands()

enableWritePrefetching()

getAddressComputationCost()

getArithmeticInstrCost()

getArithmeticReductionCost()

getAssumedAddrSpace()

getCacheAssociativity()

getCacheLineSize()

getCacheSize()

getCallerAllocaCost()

getCallInstrCost()

Compute a cost of the given call instruction.

Compute the cost of calling function F with return type RetTy and argument types Tys. F might be nullptr, in this case the cost of an arbitrary call with the specified signature will be returned. This is used, for instance, when we estimate call of a vector counterpart of the given function.

Parameters

F Called function, might be nullptr.
RetTy Return value types.
Tys Argument types.

Returns

The cost of Call instruction.

Reimplemented from llvm::TargetTransformInfoImplBase.

Reimplemented in llvm::HexagonTTIImpl.

Definition at line 3076 of file BasicTTIImpl.h.

getCastInstrCost()

getCFInstrCost()

getCmpSelInstrCost()

getEpilogueVectorizationMinVF()

getEstimatedNumberOfCaseClusters()

Try to find the estimated number of clusters. Note that the number of clusters identified in this function could be different from the actual numbers found in lowering. This function ignore switches that are lowered with a mix of jump table / bit test / BTree. This function was initially intended to be used when estimating the cost of switch in inline cost heuristic, but it's a generic cost model to be used in other places (e.g., in loop unrolling).

Reimplemented from llvm::TargetTransformInfoImplBase.

Definition at line 575 of file BasicTTIImpl.h.

getExtendedReductionCost()

getExtractWithExtendCost()

getFlatAddressSpace()

getFPOpCost()

getGEPCost()

getIndexedVectorInstrCostFromEnd()

getInlinerVectorBonusPercent()

getInliningThresholdMultiplier()

getInterleavedMemoryOpCost()

getIntrinsicInstrCost()

getMaxInterleaveFactor()

getMaxPrefetchIterationsAhead()

getMaxVScale()

getMemIntrinsicInstrCost()

getMemoryOpCost()

getMinMaxReductionCost()

getMinPrefetchStride()

getMulAccReductionCost()

getNumberOfParts()

getOperandsScalarizationOverhead()

getOrderedReductionCost()

Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence of floating point additions in lane order, starting with an initial value.

For example, consider a scalar initial value 'InitVal' of type float and a vector of type <4 x float>:

Vector = <float v0, float v1, float v2, float v3>

add1 = InitVal + v0 add2 = add1 + v1 add3 = add2 + v2 add4 = add3 + v3

As a simple estimate we can say the cost of such a reduction is 4 times the cost of a scalar FP addition. We can only estimate the costs for fixed-width vectors here because for scalable vectors we do not know the runtime number of operations.

Definition at line 3201 of file BasicTTIImpl.h.

Referenced by llvm::BasicTTIImplBase< BasicTTIImpl >::getArithmeticReductionCost().

getPeelingPreferences()

getPredicatedAddrSpace()

getPreferredLargeGEPBaseOffset()

getPreferredTailFoldingStyle()

getPrefetchDistance()

getRegisterBitWidth()

Reimplemented from llvm::TargetTransformInfoImplBase.

Reimplemented in llvm::GCNTTIImpl, llvm::HexagonTTIImpl, llvm::LoongArchTTIImpl, llvm::NVPTXTTIImpl, llvm::PPCTTIImpl, llvm::R600TTIImpl, llvm::RISCVTTIImpl, llvm::SystemZTTIImpl, llvm::VETTIImpl, llvm::WebAssemblyTTIImpl, and llvm::X86TTIImpl.

Definition at line 887 of file BasicTTIImpl.h.

getRegUsageForType()

getReplicationShuffleCost()

getScalarizationOverhead() [1/3]

Helper wrapper for the DemandedElts variant of getScalarizationOverhead.

Definition at line 954 of file BasicTTIImpl.h.

getScalarizationOverhead() [2/3]

getScalarizationOverhead() [3/3]

Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy and arguments Args of type Tys.

If Args are unknown (empty), then the cost associated with one argument is added as a heuristic.

Definition at line 990 of file BasicTTIImpl.h.

getScalingFactorCost()

getShuffleCost()

getStoreMinimumVF()

getTreeReductionCost()

Try to calculate arithmetic and shuffle op costs for reduction intrinsics.

We're assuming that reduction operation are performing the following way:

val1 = shufflevector val, undef, <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef> -------------—v----------—/ -------—v---------—/ n/2 elements n/2 elements red1 = op val, val1 After this operation we have a vector red1 where only the first n/2 elements are meaningful, the second n/2 elements are undefined and can be dropped. All other operations are actually working with the vector of length n/2, not n, though the real vector length is still n. val2 = shufflevector red1, undef, <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef> -------------—v----------—/ -------—v---------—/ n/4 elements 3*n/4 elements red2 = op red1, val2 - working with the vector of length n/2, the resulting vector has length n/4 etc.

The cost model should take into account that the actual length of the vector is reduced on each iteration.

Definition at line 3125 of file BasicTTIImpl.h.

Referenced by llvm::BasicTTIImplBase< BasicTTIImpl >::getArithmeticReductionCost().

getTypeBasedIntrinsicInstrCost()

getTypeLegalizationCost()

getUnrollingPreferences()

getVectorInstrCost() [1/3]

getVectorInstrCost() [2/3]

getVectorInstrCost() [3/3]

getVectorSplitCost()

getVScaleForTuning()

hasBranchDivergence()

haveFastSqrt()

improveShuffleKindFromMask()

instCombineIntrinsic()

isAlwaysUniform()

isFCmpOrdCheaperThanFCmpZero()

isHardwareLoopProfitable()

isIndexedLoadLegal()

isIndexedStoreLegal()

isLegalAddImmediate()

isLegalAddressingMode()

isLegalAddScalableImmediate()

isLegalICmpImmediate()

isLSRCostLess()

isNoopAddrSpaceCast()

isNumRegsMajorCostOfLSR()

isProfitableLSRChainElement()

isProfitableToHoist()

isSingleThreaded()

isSourceOfDivergence()

isTargetIntrinsicTriviallyScalarizable()

isTargetIntrinsicWithOverloadTypeAtArg()

isTargetIntrinsicWithScalarOpAtArg()

isTargetIntrinsicWithStructReturnOverloadAtField()

isTruncateFree()

isTypeLegal()

isValidAddrSpaceCast()

isVScaleKnownToBeAPowerOfTwo()

preferPredicateOverEpilogue()

preferToKeepConstantsAttached()

rewriteIntrinsicWithAddressSpace()

shouldBuildLookupTables()

shouldBuildRelLookupTables()

shouldDropLSRSolutionIfLessProfitable()

shouldPrefetchAddressSpace()

simplifyDemandedUseBitsIntrinsic()

simplifyDemandedVectorEltsIntrinsic()

useAA()

DL


The documentation for this class was generated from the following file: