LLVM: llvm::BPFTargetLowering Class Reference (original) (raw)
Public Member Functions
BPFTargetLowering (const TargetMachine &TM, const BPFSubtarget &STI)
LowerOperation (SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to use 'custom' lowering, and whose defined values are all legal.
isOffsetFoldingLegal (const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
allowsMisalignedMemoryAccesses (EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *) const override
Determine if the target supports unaligned memory accesses.
BPFTargetLowering::ConstraintType
getConstraintType (StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
std::pair< unsigned, const TargetRegisterClass * >
getRegForInlineAsmConstraint (const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
EmitInstrWithCustomInserter (MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' flag.
getHasAlu32 () const
getHasJmp32 () const
getSetCCResultType (const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
getScalarShiftAmountTy (const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
getJumpTableEncoding () const override
Return the entry encoding for a jump table in the current function.
Public Member Functions inherited from llvm::TargetLowering
TargetLowering (const TargetLowering &)=delete
operator= (const TargetLowering &)=delete
TargetLowering (const TargetMachine &TM, const TargetSubtargetInfo &STI)
NOTE: The TargetMachine owns TLOF.
~TargetLowering () override
isPositionIndependent () const
virtual bool
isSDNodeSourceOfDivergence (const SDNode *N, FunctionLoweringInfo *FLI, UniformityInfo *UA) const
virtual bool
isReassocProfitable (SelectionDAG &DAG, SDValue N0, SDValue N1) const
virtual bool
isReassocProfitable (MachineRegisterInfo &MRI, Register N0, Register N1) const
virtual bool
isSDNodeAlwaysUniform (const SDNode *N) const
virtual bool
getPreIndexedAddressParts (SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's address can be legally represented as pre-indexed load / store address.
virtual bool
getPostIndexedAddressParts (SDNode *, SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node can be combined with a load / store to form a post-indexed load / store.
virtual bool
isIndexingLegal (MachineInstr &MI, Register Base, Register Offset, bool IsPre, MachineRegisterInfo &MRI) const
Returns true if the specified base+offset is a legal indexed addressing mode for this target.
virtual MVT
getJumpTableRegTy (const DataLayout &DL) const
LowerCustomJumpTableEntry (const MachineJumpTableInfo *, const MachineBasicBlock *, unsigned, MCContext &) const
virtual SDValue
getPICJumpTableRelocBase (SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
getPICJumpTableRelocBaseExpr (const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase, but as an MCExpr.
virtual bool
isInlineAsmTargetBranch (const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use either a memory constraint or an address constraint.
isInTailCallPosition (SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
void
softenSetCCOperands (SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
void
softenSetCCOperands (SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS, SDValue &Chain, bool IsSignaling=false) const
virtual SDValue
visitMaskedLoad (SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue
visitMaskedStore (SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
makeLibCall (SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
makeLibCall (SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
It is an error to pass RTLIB::UNKNOWN_LIBCALL as LC.
parametersInCSRMatch (const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the calling function.
virtual bool
findOptimalMemOpLowering (LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
ShrinkDemandedConstant (SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
ShrinkDemandedConstant (SDValue Op, const APInt &DemandedBits, TargetLoweringOpt &TLO) const
Helper wrapper around ShrinkDemandedConstant, demanding all elements.
virtual bool
targetShrinkDemandedConstant (SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
ShrinkDemandedOp (SDValue Op, unsigned BitWidth, const APInt &DemandedBits, TargetLoweringOpt &TLO) const
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
SimplifyDemandedBits (SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
SimplifyDemandedBits (SDValue Op, const APInt &DemandedBits, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Helper wrapper around SimplifyDemandedBits, demanding all elements.
SimplifyDemandedBits (SDValue Op, const APInt &DemandedBits, DAGCombinerInfo &DCI) const
Helper wrapper around SimplifyDemandedBits.
SimplifyDemandedBits (SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, DAGCombinerInfo &DCI) const
Helper wrapper around SimplifyDemandedBits.
SimplifyMultipleUseDemandedBits (SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contribute to the DemandedBits/DemandedElts - bitwise ops etc.
SimplifyMultipleUseDemandedBits (SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG, unsigned Depth=0) const
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all elements.
SimplifyMultipleUseDemandedVectorElts (SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all bits from only some vector elements.
SimplifyDemandedVectorElts (SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
SimplifyDemandedVectorElts (SDValue Op, const APInt &DemandedElts, DAGCombinerInfo &DCI) const
Helper wrapper around SimplifyDemandedVectorElts.
virtual bool
shouldSimplifyDemandedVectorElts (SDValue Op, const TargetLoweringOpt &TLO) const
Return true if the target supports simplifying demanded vector elements by converting them to undefs.
virtual void
computeKnownBitsForTargetNode (const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in the KnownZero/KnownOne bitsets.
virtual void
computeKnownBitsForTargetInstr (GISelValueTracking &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in the KnownZero/KnownOne bitsets.
virtual void
computeKnownFPClassForTargetInstr (GISelValueTracking &Analysis, Register R, KnownFPClass &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
virtual Align
computeKnownAlignForTargetInstr (GISelValueTracking &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine the known alignment for the pointer value R.
virtual void
computeKnownBitsForFrameIndex (int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
virtual unsigned
ComputeNumSignBitsForTargetNode (SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits to the DAG Combiner.
virtual unsigned
computeNumSignBitsForTargetInstr (GISelValueTracking &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits to GlobalISel combiners.
virtual bool
SimplifyDemandedVectorEltsForTargetNode (SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success.
virtual bool
SimplifyDemandedBitsForTargetNode (SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
virtual SDValue
SimplifyMultipleUseDemandedBitsForTargetNode (SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contribute to the DemandedBits/DemandedElts - bitwise ops etc.
virtual bool
isGuaranteedNotToBeUndefOrPoisonForTargetNode (SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that [Op](namespacellvm.html#ab471937b9a227e70c7fe8bd9604014d6) is never poison and, if PoisonOnly is false, does not have undef bits.
virtual bool
canCreateUndefOrPoisonForTargetNode (SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
buildLegalVectorShuffle (EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
getTargetConstantFromLoad (LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
virtual bool
isKnownNeverNaNForTargetNode (SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
virtual bool
isSplatValueForTargetNode (SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector [Op](namespacellvm.html#ab471937b9a227e70c7fe8bd9604014d6) has the same value across all DemandedElts, indicating any elements which may be undef in the output UndefElts.
virtual bool
isTargetCanonicalConstantNode (SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be transformed back into a BUILD_VECTOR.
virtual bool
isTargetCanonicalSelect (SDNode *N) const
Return true if the given select/vselect should be considered canonical and not be transformed.
isConstTrueVal (SDValue N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
isConstFalseVal (SDValue N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
isExtendedTrueVal (const ConstantSDNode *N, EVT VT, bool SExt) const
Return if N is a True value when extended to VT.
SimplifySetCC (EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
virtual SDValue
unwrapAddress (SDValue N) const
virtual bool
isGAPlusOffset (SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual SDValue
PerformDAGCombine (SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target has registered with invoke it for.
virtual bool
isDesirableToCommuteWithShift (const SDNode *N, CombineLevel Level) const
Return true if it is profitable to move this shift by a constant amount through its operand, adjusting any immediate operands as necessary to preserve semantics.
virtual bool
isDesirableToCommuteWithShift (const MachineInstr &MI, bool IsAfterLegal) const
GlobalISel - return true if it is profitable to move this shift by a constant amount through its operand, adjusting any immediate operands as necessary to preserve semantics.
virtual bool
isDesirableToPullExtFromShl (const MachineInstr &MI) const
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x), y => zext (shl x, y)
virtual AndOrSETCCFoldKind
isDesirableToCombineLogicOpOfSETCC (const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const
virtual bool
isDesirableToCommuteXorWithShift (const SDNode *N) const
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT.
virtual bool
isTypeDesirableForOp (unsigned, EVT VT) const
Return true if the target has native support for the specified value type and it is 'desirable' to use the type for the given node type.
virtual bool
isDesirableToTransformToIntegerOp (unsigned, EVT) const
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode to a equivalent op of an integer type.
virtual bool
IsDesirableToPromoteOp (SDValue, EVT &) const
This method query the target whether it is beneficial for dag combiner to promote the specified node.
virtual bool
Return true if the target supports swifterror attribute.
virtual bool
supportSplitCSR (MachineFunction *MF) const
Return true if the target supports that a subset of CSRs for the given machine function is handled explicitly via copies.
virtual bool
Return true if the target supports kcfi operand bundles.
virtual bool
supportPtrAuthBundles () const
Return true if the target supports ptrauth operand bundles.
virtual void
initializeSplitCSR (MachineBasicBlock *Entry) const
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
virtual void
insertCopiesSplitCSR (MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const
Insert explicit copies in entry and exit blocks.
virtual SDValue
getNegatedExpression (SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in [Cost](namespacellvm.html#a19921a3ceb99548f498d3df118eda9ed) to indicate that if it is cheaper or neutral to do the negation.
getCheaperOrNeutralNegatedExpression (SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, const NegatibleCost CostThreshold=NegatibleCost::Neutral, unsigned Depth=0) const
getCheaperNegatedExpression (SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression only when the cost is cheaper.
getNegatedExpression (SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression if the cost is not expensive.
virtual bool
splitValueIntoRegisterParts (SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue
joinRegisterPartsIntoValue (SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue
lowerEHPadEntry (SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
Optional target hook to add target-specific actions when entering EH pad blocks.
virtual void
markLibCallAttributes (MachineFunction *MF, unsigned CC, ArgListTy &Args) const
LowerCallTo (CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual void
HandleByVal (CCState *, unsigned &, Align) const
Target-specific cleanup for formal ByVal parameters.
virtual bool
CanLowerReturn (CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fit into the return registers.
virtual bool
isUsedByReturnOnly (SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual bool
mayBeEmittedAsTailCall (const CallInst *) const
Return true if the target may be able emit the call instruction as a tail call.
virtual Register
getRegisterByName (const char *RegName, LLT Ty, const MachineFunction &MF) const
Return the register ID of the name passed in.
virtual EVT
getTypeForExtReturn (LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
virtual bool
functionArgumentNeedsConsecutiveRegisters (Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
virtual bool
shouldSplitFunctionArgumentsAsLittleEndian (const DataLayout &DL) const
For most targets, an LLVM type must be broken down into multiple smaller types.
getScratchRegisters (CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
getRoundingControlRegisters () const
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
virtual SDValue
prepareVolatileOrAtomicLoad (SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual void
LowerOperationWrapper (SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but legal result types.
getTargetNodeName (unsigned Opcode) const
This method returns the name of a target specific DAG node.
virtual FastISel *
createFastISel (FunctionLoweringInfo &, const TargetLibraryInfo *) const
This method returns a target specific FastISel object, or null if the target does not support "fast" ISel.
virtual AsmOperandInfoVector
ParseConstraints (const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their prefixes, and also tie in the associated operand values.
virtual ConstraintWeight
getMultipleConstraintMatchWeight (AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
virtual ConstraintWeight
getSingleConstraintMatchWeight (AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual void
ComputeConstraintToUse (AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
getConstraintPreferences (AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constraint codes and their types in priority of what we'd prefer to lower them as.
virtual InlineAsm::ConstraintCode
getInlineAsmMemConstraint (StringRef ConstraintCode) const
LowerXConstraint (EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requirements based on the type of the corresponding operand.
virtual void
LowerAsmOperandForConstraint (SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual SDValue
LowerAsmOutputForConstraint (SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual void
CollectTargetIntrinsicOperands (const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
BuildSDIV (SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will generate the same value by multiplying by a magic number.
BuildUDIV (SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will generate the same value by multiplying by a magic number.
buildSDIVPow2WithCMov (SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 10-1 If conditional move/branch is preferred, we lower sdiv x, +/-2**k into: bgez x, label add x, x, 2**k-1 label: sra res, x, k neg res, res (when the divisor is negative)
virtual SDValue
BuildSDIVPow2 (SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
virtual SDValue
BuildSREMPow2 (SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SREM lowering for power-of-2 denominators.
virtual unsigned
combineRepeatedFPDivisors () const
Indicate whether this target prefers to combine FDIVs with the same divisor.
virtual SDValue
getSqrtEstimate (SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const
Hooks for building estimates in place of slower divisions and square roots.
createSelectForFMINNUM_FMAXNUM (SDNode *Node, SelectionDAG &DAG) const
Try to convert the fminnum/fmaxnum to a compare/select sequence.
virtual SDValue
getRecipEstimate (SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const
Return a reciprocal estimate value for the input operand.
virtual SDValue
getSqrtInputTest (SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square root estimate calculation.
virtual SDValue
getSqrtResultForDenormInput (SDValue Operand, SelectionDAG &DAG) const
Return a target-dependent result if the input operand is not suitable for use with a square root estimate calculation.
expandMUL_LOHI (unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, SDValue RHS, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively, each computing an n/2-bit part of the result.
expandMUL (SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
expandDIVREMByConstant (SDNode *N, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, SDValue LL=SDValue(), SDValue LH=SDValue()) const
Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit urem by constant and other arithmetic ops.
expandFunnelShift (SDNode *N, SelectionDAG &DAG) const
Expand funnel shift.
expandROT (SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const
Expand rotations.
void
expandShiftParts (SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
expandFP_TO_SINT (SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
expandFP_TO_UINT (SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand float to UINT conversion.
expandUINT_TO_FP (SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand UINT(i64) to double(f64) conversion.
expandFMINNUM_FMAXNUM (SDNode *N, SelectionDAG &DAG) const
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
expandFMINIMUM_FMAXIMUM (SDNode *N, SelectionDAG &DAG) const
Expand fminimum/fmaximum into multiple comparison with selects.
expandFMINIMUMNUM_FMAXIMUMNUM (SDNode *N, SelectionDAG &DAG) const
Expand fminimumnum/fmaximumnum into multiple comparison with selects.
expandFP_TO_INT_SAT (SDNode *N, SelectionDAG &DAG) const
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
expandRoundInexactToOdd (EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
expandFP_ROUND (SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
expandIS_FPCLASS (EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
expandCTPOP (SDNode *N, SelectionDAG &DAG) const
Expand CTPOP nodes.
expandVPCTPOP (SDNode *N, SelectionDAG &DAG) const
Expand VP_CTPOP nodes.
expandCTLZ (SDNode *N, SelectionDAG &DAG) const
Expand CTLZ/CTLZ_ZERO_UNDEF nodes.
expandVPCTLZ (SDNode *N, SelectionDAG &DAG) const
Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
CTTZTableLookup (SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op, unsigned NumBitsPerElt) const
Expand CTTZ via Table Lookup.
expandCTTZ (SDNode *N, SelectionDAG &DAG) const
Expand CTTZ/CTTZ_ZERO_UNDEF nodes.
expandVPCTTZ (SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
expandVPCTTZElements (SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_UNDEF nodes.
expandVectorFindLastActive (SDNode *N, SelectionDAG &DAG) const
Expand VECTOR_FIND_LAST_ACTIVE nodes.
expandABS (SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
expandABD (SDNode *N, SelectionDAG &DAG) const
Expand ABDS/ABDU nodes.
expandAVG (SDNode *N, SelectionDAG &DAG) const
Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
expandBSWAP (SDNode *N, SelectionDAG &DAG) const
Expand BSWAP nodes.
expandVPBSWAP (SDNode *N, SelectionDAG &DAG) const
Expand VP_BSWAP nodes.
expandBITREVERSE (SDNode *N, SelectionDAG &DAG) const
Expand BITREVERSE nodes.
expandVPBITREVERSE (SDNode *N, SelectionDAG &DAG) const
Expand VP_BITREVERSE nodes.
scalarizeVectorLoad (LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
scalarizeVectorStore (StoreSDNode *ST, SelectionDAG &DAG) const
expandUnalignedLoad (LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
expandUnalignedStore (StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
IncrementMemoryAddress (SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
getVectorElementPointer (SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index, const SDNodeFlags PtrArithFlags=SDNodeFlags()) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base address of VecPtr.
getInboundsVectorElementPointer (SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base address of VecPtr.
getVectorSubVecPointer (SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index, const SDNodeFlags PtrArithFlags=SDNodeFlags()) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type VecVT starting at a base address of VecPtr.
expandIntMINMAX (SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][MIN|MAX].
expandAddSubSat (SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
expandCMP (SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]CMP.
expandShlSat (SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]SHLSAT.
expandFixedPointMul (SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT].
expandFixedPointDiv (unsigned Opcode, const SDLoc &dl, SDValue LHS, SDValue RHS, unsigned Scale, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]DIVFIX[SAT].
void
expandUADDSUBO (SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::U(ADD|SUB)O.
void
expandSADDSUBO (SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::S(ADD|SUB)O.
expandMULO (SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]MULO.
void
forceExpandMultiply (SelectionDAG &DAG, const SDLoc &dl, bool Signed, SDValue &Lo, SDValue &Hi, SDValue LHS, SDValue RHS, SDValue HiLHS=SDValue(), SDValue HiRHS=SDValue()) const
Calculate the product twice the width of LHS and RHS.
void
forceExpandWideMUL (SelectionDAG &DAG, const SDLoc &dl, bool Signed, const SDValue LHS, const SDValue RHS, SDValue &Lo, SDValue &Hi) const
Calculate full product of LHS and RHS either via a libcall or through brute force expansion of the multiplication.
expandVecReduce (SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_* into an explicit calculation.
expandVecReduceSeq (SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
expandREM (SDNode *Node, SDValue &Result, SelectionDAG &DAG) const
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
expandVectorSplice (SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
expandVECTOR_COMPRESS (SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily, advance store position, before re-loading the final vector.
expandPartialReduceMLA (SDNode *Node, SelectionDAG &DAG) const
Expands PARTIAL_REDUCE_S/UMLA nodes to a series of simpler operations, consisting of zext/sext, extract_subvector, mul and add operations.
expandMultipleResultFPLibCall (SelectionDAG &DAG, RTLIB::Libcall LC, SDNode *Node, SmallVectorImpl< SDValue > &Results, std::optional< unsigned > CallRetResNo={}) const
Expands a node with multiple results to an FP or vector libcall.
LegalizeSetCCCondCode (SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, SDValue EVL, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling=false) const
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target.
virtual void
AdjustInstrPostInstrSelection (MachineInstr &MI, SDNode *Node) const
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
virtual bool
useLoadStackGuardNode (const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
virtual SDValue
emitStackGuardXorFP (SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual SDValue
LowerToTLSEmulatedModel (const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
virtual SDValue
expandIndirectJTBranch (const SDLoc &dl, SDValue Value, SDValue Addr, int JTI, SelectionDAG &DAG) const
Expands target specific indirect branch for the case of JumpTable expansion.
lowerCmpEqZeroToCtlzSrl (SDValue Op, SelectionDAG &DAG) const
virtual bool
isXAndYEqZeroPreferableToXAndYEqY (ISD::CondCode, EVT) const
expandVectorNaryOpBySplitting (SDNode *Node, SelectionDAG &DAG) const
scalarizeExtractedVectorLoad (EVT ResultVT, const SDLoc &DL, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad, SelectionDAG &DAG) const
Replace an extraction of a load with a narrowed load.
Public Member Functions inherited from llvm::TargetLoweringBase
TargetLoweringBase (const TargetMachine &TM, const TargetSubtargetInfo &STI)
NOTE: The TargetMachine owns TLOF.
TargetLoweringBase (const TargetLoweringBase &)=delete
operator= (const TargetLoweringBase &)=delete
virtual
Return true if the target support strict float operation.
virtual bool
virtual MVT
getPointerTy (const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layout.
virtual MVT
getPointerMemTy (const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the data layout.
getFrameIndexTy (const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through the data layout.
getProgramPointerTy (const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through the data layout.
virtual MVT
getFenceOperandTy (const DataLayout &DL) const
Return the type for operands of fence.
getShiftAmountTy (EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual LLVM_READONLY LLT
getPreferredShiftAmountTy (LLT ShiftValueTy) const
Return the preferred type to use for a shift opcode, given the shifted amount type is ShiftValueTy.
virtual unsigned
getVectorIdxWidth (const DataLayout &DL) const
Returns the type to be used for the index operand vector operations.
getVectorIdxTy (const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR.
getVectorIdxLLT (const DataLayout &DL) const
Returns the type to be used for the index operand of: G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT, G_INSERT_SUBVECTOR, and G_EXTRACT_SUBVECTOR.
virtual MVT
getVPExplicitVectorLengthTy () const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB, etc.
virtual MachineMemOperand::Flags
getTargetMMOFlags (const Instruction &I) const
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand flags to them.
virtual MachineMemOperand::Flags
getTargetMMOFlags (const MemSDNode &Node) const
This callback is used to inspect load/store SDNode.
getLoadMemOperandFlags (const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
getStoreMemOperandFlags (const StoreInst &SI, const DataLayout &DL) const
getAtomicMemOperandFlags (const Instruction &AI, const DataLayout &DL) const
getVPIntrinsicMemOperandFlags (const VPIntrinsic &VPIntrin) const
virtual bool
isSelectSupported (SelectSupportKind) const
virtual bool
shouldExpandGetActiveLaneMask (EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in SelectionDAGBuilder.
virtual bool
shouldExpandGetVectorLength (EVT CountVT, unsigned VF, bool IsScalable) const
virtual bool
shouldExpandCttzElements (EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in SelectionDAGBuilder.
getBitWidthForCttzElements (Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vector elements.
virtual bool
shouldExpandVectorMatch (EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ and search size ‘SearchSize’ using generic code in SelectionDAGBuilder.
virtual bool
shouldReassociateReduction (unsigned RedOpc, EVT VT) const
virtual bool
reduceSelectOfFPConstantLoads (EVT CmpOpVT) const
Return true if it is profitable to convert a select of FP constants into a constant pool load whose address depends on the select condition.
virtual bool
hasMultipleConditionRegisters (EVT VT) const
Does the target have multiple (allocatable) condition registers that can be used to store the results of comparisons for use by selects and conditional branches.
Return true if the target has BitExtract instructions.
virtual TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction (MVT VT) const
Return the preferred vector type legalization action.
virtual bool
virtual bool
virtual bool
shouldExpandBuildVectorWithShuffles (EVT, unsigned DefinedValues) const
virtual bool
hasStandaloneRem (EVT VT) const
Return true if the target can handle a standalone remainder operation.
virtual bool
isFsqrtCheap (SDValue X, SelectionDAG &DAG) const
Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
int
getRecipEstimateSqrtEnabled (EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a square root of the given type based on the function's attributes.
int
getRecipEstimateDivEnabled (EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a division of the given type based on the function's attributes.
int
getSqrtRefinementSteps (EVT VT, MachineFunction &MF) const
Return the refinement step count for a square root of the given type based on the function's attributes.
int
getDivRefinementSteps (EVT VT, MachineFunction &MF) const
Return the refinement step count for a division of the given type based on the function's attributes.
Returns true if target has indicated at least one type should be bypassed.
const DenseMap< unsigned int, unsigned int > &
getBypassSlowDivWidths () const
Returns map of slow types for division or remainder with corresponding fast types.
virtual bool
isVScaleKnownToBeAPowerOfTwo () const
Return true only if vscale must be a power of two.
Return true if Flow Control is an expensive operation that should be avoided.
virtual CondMergingParams
getJumpConditionMergingParams (Instruction::BinaryOps, const Value *, const Value *) const
isPredictableSelectExpensive () const
Return true if selects are only cheaper than branches if the branch is unlikely to be predicted right.
virtual bool
fallBackToDAGISel (const Instruction &Inst) const
virtual bool
isLoadBitCastBeneficial (EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On architectures that don't natively support some vector loads efficiently, casting the load to a smaller vector of larger types and loading is more efficient, however, this can be undone by optimizations in dag combiner.
virtual bool
isStoreBitCastBeneficial (EVT StoreVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: (store (y (conv x)), y*)) -> (store x, (x*))
virtual bool
storeOfVectorConstantIsCheap (bool IsZero, EVT MemVT, unsigned NumElem, unsigned AddrSpace) const
Return true if it is expected to be cheaper to do a store of vector constant with the given size and type for the address space than to store the individual scalar element constants.
virtual bool
mergeStoresAfterLegalization (EVT MemVT) const
Allow store merging for the specified type after legalization in addition to before legalization.
virtual bool
canMergeStoresTo (unsigned AS, EVT MemVT, const MachineFunction &MF) const
Returns if it's reasonable to merge stores to MemVT size.
virtual bool
isCheapToSpeculateCttz (Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
virtual bool
isCheapToSpeculateCtlz (Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual bool
isCtlzFast () const
Return true if ctlz instruction is fast.
virtual bool
isCtpopFast (EVT VT) const
Return true if ctpop instruction is fast.
virtual unsigned
getCustomCtpopCost (EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a custom CTPOP.
virtual bool
isEqualityCmpFoldedWithSignedCmp () const
Return true if instruction generated for equality comparison is folded with instruction generated for signed comparison.
virtual bool
preferZeroCompareBranch () const
Return true if the heuristic to prefer icmp eq zero should be used in code gen prepare.
virtual bool
isMultiStoresCheaperThanBitsMerge (EVT LTy, EVT HTy) const
Return true if it is cheaper to split the store of a merged int val from a pair of smaller values into multiple stores.
virtual bool
isMaskAndCmp0FoldingBeneficial (const Instruction &AndI) const
Return if the target supports combining a chain like:
virtual bool
areTwoSDNodeTargetMMOFlagsMergeable (const MemSDNode &NodeX, const MemSDNode &NodeY) const
Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
virtual bool
convertSetCCLogicToBitwiseLogic (EVT VT) const
Use bitwise logic to make pairs of compares more efficient.
virtual MVT
hasFastEqualityCompare (unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the given size.
virtual bool
hasAndNotCompare (SDValue Y) const
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) != Y ---> (~X & Y) != 0.
virtual bool
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify select or other instructions.
virtual bool
hasBitTest (SDValue X, SDValue Y) const
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be used to prevent breaking the pattern, or creating it if it could be recognized.
virtual bool
shouldFoldMaskToVariableShiftPair (SDValue X) const
There are two ways to clear extreme bits (either low or high): Mask: x & (-1 << y) (the instcombine canonical form) Shifts: x >> y << y Return true if the variant with 2 variable shifts is preferred.
virtual bool
shouldFoldConstantShiftPairToMask (const SDNode *N) const
Return true if it is profitable to fold a pair of shifts into a mask.
virtual bool
shouldTransformSignedTruncationCheck (EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be truncating or not: (add x, (1 << (KeptBits-1))) srccond (1 << KeptBits) Into it's more traditional form: ((x << C) a>> C) dstcond x Return true if we should transform.
virtual bool
shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd (SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <> Y) & C) ==/!= 0 WARNING: if 'X' is a constant, the fold may deadlock!
virtual bool
optimizeFMulOrFDivAsShiftAddBitcast (SDNode *N, SDValue FPConst, SDValue IntPow2) const
virtual unsigned
preferedOpcodeForCmpEqPiecesOfOperand (EVT VT, unsigned ShiftOpc, bool MayTransformRotate, const APInt &ShiftOrRotateAmt, const std::optional< APInt > &AndMask) const
virtual bool
preferIncOfAddToSubOfNot (EVT VT) const
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR-canonical.
virtual bool
preferABDSToABSWithNSW (EVT VT) const
virtual bool
preferScalarizeSplat (SDNode *N) const
virtual bool
preferSextInRegOfTruncate (EVT TruncVT, EVT VT, EVT ExtVT) const
Return true if the target wants to use the optimization that turns ext(promotableInst1(...(promotableInstN(load)))) into promotedInst1(...(promotedInstN(ext(load)))).
virtual bool
canCombineStoreAndExtract (Type *VectorTy, Value *Idx, unsigned &Cost) const
Return true if the target can combine store(extractelement VectorTy, Idx).
virtual bool
shallExtractConstSplatVectorElementToStore (Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is known to be splat of constant.
virtual bool
shouldSplatInsEltVarIndex (EVT) const
Return true if inserting a scalar into a variable element of an undef vector is more efficiently handled by splatting the scalar instead.
virtual bool
enableAggressiveFMAFusion (EVT VT) const
Return true if target always benefits from combining into FMA for a given value type.
virtual bool
enableAggressiveFMAFusion (LLT Ty) const
Return true if target always benefits from combining into FMA for a given value type.
virtual MVT::SimpleValueType
getCmpLibcallReturnType () const
Return the ValueType for comparison libcalls.
getBooleanContents (bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in types wider than i1.
getBooleanContents (EVT Type) const
promoteTargetBoolean (SelectionDAG &DAG, SDValue Bool, EVT ValVT) const
Promote the given target boolean to a target boolean of the given type.
getSchedulingPreference () const
Return target scheduling preference.
virtual Sched::Preference
getSchedulingPreference (SDNode *) const
Some scheduler, e.g.
virtual const TargetRegisterClass *
getRegClassFor (MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool
requiresUniformRegister (MachineFunction &MF, const Value *) const
Allows target to decide about the register class of the specific value that is live outside the defining block.
virtual const TargetRegisterClass *
getRepRegClassFor (MVT VT) const
Return the 'representative' register class for the specified value type.
virtual uint8_t
getRepRegClassCostFor (MVT VT) const
Return the cost of the 'representative' register class for the specified value type.
virtual ShiftLegalizationStrategy
preferredShiftLegalizationStrategy (SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const
isTypeLegal (EVT VT) const
Return true if the target has native support for the specified value type.
getTypeConversion (LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in order to type-legalize it.
getTypeAction (LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we need to promote it to a larger type (return 'Promote'), or we need to expand it into multiple registers of smaller integer type (return 'Expand').
getTypeAction (MVT VT) const
virtual EVT
getTypeToTransformTo (LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
getTypeToExpandTo (LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
getVectorTypeBreakdown (LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual unsigned
getVectorTypeBreakdownForCallingConv (LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scalars in some contexts.
virtual bool
getTgtMemIntrinsic (IntrinsicInfo &, const CallBase &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (touches memory).
virtual bool
isFPImmLegal (const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual bool
isShuffleMaskLegal (ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations, those with specific masks.
virtual bool
canOpTrap (unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual bool
isVectorClearMaskLegal (ArrayRef< int >, EVT) const
Similar to isShuffleMaskLegal.
virtual LegalizeAction
getCustomOperationAction (SDNode &Op) const
How to legalize this custom operation?
getOperationAction (unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it.
virtual bool
isSupportedFixedPointOperation (unsigned Op, EVT VT, unsigned Scale) const
Custom method defined by each target to indicate if an operation which may require a scale is supported natively by the target.
getFixedPointOperationAction (unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
getStrictFPOperationAction (unsigned Op, EVT VT) const
isOperationLegalOrCustom (unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lowering.
isOperationLegalOrPromote (unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
isOperationLegalOrCustomOrPromote (unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lowering or using promotion.
isOperationCustom (unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
virtual bool
areJTsAllowed (const Function *Fn) const
Return true if lowering to a jump table is allowed.
rangeFitsInWord (const APInt &Low, const APInt &High, const DataLayout &DL) const
Check whether the range [Low,High] fits in a machine word.
virtual bool
isSuitableForJumpTable (const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumCases cases, Range range of values.
virtual MVT
getPreferredSwitchConditionType (LLVMContext &Context, EVT ConditionVT) const
Returns preferred type for switch condition.
isSuitableForBitTests (const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests unique destinations, Low and High as its lowest and highest case values, and expects NumCmps case value comparisons.
isOperationExpand (unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with custom lowering.
isOperationLegal (unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
getLoadExtAction (unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it.
isLoadExtLegal (unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
isLoadExtLegalOrCustom (unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal or custom on this target.
getAtomicLoadExtAction (unsigned ExtType, EVT ValVT, EVT MemVT) const
Same as getLoadExtAction, but for atomic loads.
isAtomicLoadExtLegal (unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified atomic load with extension is legal on this target.
getTruncStoreAction (EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it.
isTruncStoreLegal (EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
isTruncStoreLegalOrCustom (EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation has solution on this target.
virtual bool
canCombineTruncStore (EVT ValVT, EVT MemVT, bool LegalOnly) const
getIndexedLoadAction (unsigned IdxMode, MVT VT) const
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it.
isIndexedLoadLegal (unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
getIndexedStoreAction (unsigned IdxMode, MVT VT) const
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it.
isIndexedStoreLegal (unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
getIndexedMaskedLoadAction (unsigned IdxMode, MVT VT) const
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it.
isIndexedMaskedLoadLegal (unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
getIndexedMaskedStoreAction (unsigned IdxMode, MVT VT) const
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it.
isIndexedMaskedStoreLegal (unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
virtual bool
shouldExtendGSIndex (EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual bool
shouldRemoveExtendFromGSIndex (SDValue Extend, EVT DataVT) const
virtual bool
isLegalScaleForGatherScatter (uint64_t Scale, uint64_t ElemSize) const
getCondCodeAction (ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some other code sequence, or the target has a custom expander for it.
isCondCodeLegal (ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this target.
isCondCodeLegalOrCustom (ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom for a comparison of the specified types on this target.
getPartialReduceMLAAction (unsigned Opc, EVT AccVT, EVT InputVT) const
Return how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type InputVT should be treated.
isPartialReduceMLALegalOrCustom (unsigned Opc, EVT AccVT, EVT InputVT) const
Return true if a PARTIAL_REDUCE_U/SMLA node with the specified types is legal or custom for this target.
getTypeToPromoteTo (unsigned Op, MVT VT) const
If the action for this operation is to promote, this method returns the ValueType to promote to.
virtual EVT
getAsmOperandValueType (const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
getValueType (const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
getMemValueType (const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
getSimpleValueType (const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
virtual Align
getByValTypeAlignment (Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parameter area.
getRegisterType (MVT VT) const
Return the type of registers that this ValueType will eventually require.
getRegisterType (LLVMContext &Context, EVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual unsigned
getNumRegisters (LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual MVT
getRegisterTypeForCallingConv (LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations and not for other operations.
virtual unsigned
getNumRegistersForCallingConv (LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual Align
getABIAlignmentForCallingConv (Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requirement of another type.
virtual bool
ShouldShrinkFPConstant (EVT) const
If true, then instruction selection should seek to shrink the FP constant of the specified type to a smaller type in order to save space and / or reduce runtime.
virtual bool
shouldRemoveRedundantExtend (SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant, and use x directly.
isPaddedAtMostSignificantBitsWhenStored (EVT VT) const
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to memory and the type size isn't equal to the store size.
hasBigEndianPartOrdering (EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
hasTargetDAGCombine (ISD::NodeType NT) const
If true, the target has custom DAG combine transformations that it can perform for the specified node.
getGatherAllAliasesMaxDepth () const
virtual unsigned
getVaListSizeInBits (const DataLayout &DL) const
Returns the size of the platform's va_list object.
getMaxStoresPerMemset (bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
getMaxStoresPerMemcpy (bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
virtual unsigned
getMaxGluedStoresPerMemcpy () const
Get maximum # of store operations to be glued together.
getMaxExpandSizeMemcmp (bool OptSize) const
Get maximum # of load operations permitted for memcmp.
getMaxStoresPerMemmove (bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual bool
allowsMisalignedMemoryAccesses (LLT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
LLT handling variant.
allowsMemoryAccessForAlignment (LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unaligned memory access.
allowsMemoryAccessForAlignment (LLVMContext &Context, const DataLayout &DL, EVT VT, const MachineMemOperand &MMO, unsigned *Fast=nullptr) const
Return true if the memory access of this type is aligned or if the target allows this specific unaligned access for the given MachineMemOperand.
virtual bool
allowsMemoryAccess (LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and alignment.
allowsMemoryAccess (LLVMContext &Context, const DataLayout &DL, EVT VT, const MachineMemOperand &MMO, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given MachineMemOperand.
allowsMemoryAccess (LLVMContext &Context, const DataLayout &DL, LLT Ty, const MachineMemOperand &MMO, unsigned *Fast=nullptr) const
LLT handling variant.
virtual LLT
getOptimalMemOpLLT (const MemOp &Op, const AttributeList &) const
LLT returning variant.
virtual bool
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
virtual unsigned
getMinimumJumpTableEntries () const
Return lower limit for number of blocks in a jump table.
getMinimumJumpTableDensity (bool OptForSize) const
Return lower limit of the density in a jump table.
getMaximumJumpTableSize () const
Return upper limit for number of entries in a jump table.
virtual bool
getMinimumBitTestCmps () const
Retuen the minimum of largest number of comparisons in BitTest.
getStackPointerRegisterToSaveRestore () const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
virtual Register
getExceptionPointerRegister (const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an EH pad.
virtual Register
getExceptionSelectorRegister (const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a landing pad.
virtual bool
needsFixedCatchObjects () const
getMinStackArgumentAlignment () const
Return the minimum stack alignment of an argument.
getMinFunctionAlignment () const
Return the minimum function alignment.
getPrefFunctionAlignment () const
Return the preferred function alignment.
virtual Align
getPrefLoopAlignment (MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
virtual unsigned
getMaxPermittedBytesForAlignment (MachineBasicBlock *MBB) const
Return the maximum amount of bytes allowed to be emitted when padding for alignment.
virtual bool
alignLoopsWithOptSize () const
Should loops be aligned even when the function is marked OptSize (but not MinSize).
virtual Value *
getIRStackGuard (IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that location.
virtual void
insertSSPDeclarations (Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
virtual Value *
getSDagStackGuard (const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nullptr.
virtual bool
If this function returns true, stack protection checks should XOR the frame pointer (or whichever pointer is used to address locals) into the stack guard value before checking it.
Function *
getSSPStackGuardCheck (const Module &M) const
If the target has a standard stack protection check function that performs validation and error handling, returns the function.
virtual Value *
getSafeStackPointerLocation (IRBuilderBase &IRB) const
Returns the target-specific address of the unsafe stack pointer.
virtual bool
hasStackProbeSymbol (const MachineFunction &MF) const
Returns the name of the symbol used to emit stack probes or the empty string if not applicable.
virtual bool
hasInlineStackProbe (const MachineFunction &MF) const
virtual StringRef
getStackProbeSymbolName (const MachineFunction &MF) const
virtual bool
isFreeAddrSpaceCast (unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
virtual bool
shouldAlignPointerArgs (CallInst *, unsigned &, Align &) const
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is being passed.
virtual void
emitAtomicCmpXchgNoStoreLLBalance (IRBuilderBase &Builder) const
virtual bool
shouldSignExtendTypeInLibCall (Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
virtual bool
shouldExtendTypeInLibCall (EVT Type) const
Returns true if arguments should be extended in lib calls.
virtual AtomicExpansionKind
shouldExpandAtomicLoadInIR (LoadInst *LI) const
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
virtual AtomicExpansionKind
shouldCastAtomicLoadInIR (LoadInst *LI) const
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
virtual AtomicExpansionKind
shouldExpandAtomicStoreInIR (StoreInst *SI) const
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
virtual AtomicExpansionKind
shouldCastAtomicStoreInIR (StoreInst *SI) const
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
virtual AtomicExpansionKind
shouldExpandAtomicCmpXchgInIR (AtomicCmpXchgInst *AI) const
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
virtual AtomicExpansionKind
shouldExpandAtomicRMWInIR (AtomicRMWInst *RMW) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
virtual AtomicExpansionKind
shouldCastAtomicRMWIInIR (AtomicRMWInst *RMWI) const
Returns how the given atomic atomicrmw should be cast by the IR-level AtomicExpand pass.
virtual LoadInst *
lowerIdempotentRMWIntoFencedLoad (AtomicRMWInst *RMWI) const
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can be turned into a fence followed by an atomic load.
virtual ISD::NodeType
getExtendForAtomicOps () const
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND).
virtual ISD::NodeType
getExtendForAtomicCmpSwapArg () const
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND).
virtual ISD::NodeType
getExtendForAtomicRMWArg (unsigned Op) const
Returns how the platform's atomic rmw operations expect their input argument to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND).
virtual bool
shouldNormalizeToSelectSequence (LLVMContext &Context, EVT VT) const
Returns true if we should normalize select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely that it saves us from materializing N0 and N1 in an integer register.
virtual bool
isProfitableToCombineMinNumMaxNum (EVT VT) const
virtual bool
convertSelectOfConstantsToMath (EVT VT) const
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops with the condition value.
virtual bool
decomposeMulByConstant (LLVMContext &Context, EVT VT, SDValue C) const
Return true if it is profitable to transform an integer multiplication-by-constant into simpler operations like shifts and adds.
virtual bool
isMulAddWithConstProfitable (SDValue AddNode, SDValue ConstNode) const
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
virtual bool
shouldUseStrictFP_TO_INT (EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonicalizing the FP source value instead of converting all cases and then selecting based on value.
isBeneficialToExpandPowI (int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
virtual bool
getAddrModeArguments (const IntrinsicInst *, SmallVectorImpl< Value * > &, Type *&) const
CodeGenPrepare sinks address calculations into the same BB as Load/Store instructions reading the address.
virtual bool
addressingModeSupportsTLS (const GlobalValue &) const
Returns true if the targets addressing mode can target thread local storage (TLS).
virtual int64_t
getPreferredLargeGEPBaseOffset (int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
virtual bool
isLegalICmpImmediate (int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
virtual bool
isLegalAddImmediate (int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instructions which can add a register with the immediate without having to materialize the immediate into a register.
virtual bool
isLegalAddScalableImmediate (int64_t) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructions which can add a register with the immediate (multiplied by vscale) without having to materialize the immediate into a register.
virtual bool
isLegalStoreImmediate (int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
virtual Type *
shouldConvertSplatType (ShuffleVectorInst *SVI) const
Given a shuffle vector SVI representing a vector splat, return a new scalar type of size equal to SVI's scalar type if the new type is more profitable.
virtual bool
shouldConvertPhiType (Type *From, Type *To) const
Given a set in interconnected phis of type 'From' that are loaded/stored or bitcast to type 'To', return true if the set should be converted to 'To'.
virtual bool
isCommutativeBinOp (unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual bool
isBinOp (unsigned Opcode) const
Return true if the node is a math/logic binary operator.
virtual bool
allowTruncateForTailCall (Type *FromTy, Type *ToTy) const
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail position.
virtual bool
isTruncateFree (LLT FromTy, LLT ToTy, LLVMContext &Ctx) const
virtual bool
isTruncateFree (SDValue Val, EVT VT2) const
Return true if truncating the specific node Val to type VT2 is free.
virtual bool
isProfitableToHoist (Instruction *I) const
isExtFree (const Instruction *I) const
Return true if the extension represented by I is free.
isExtLoad (const LoadInst *Load, const Instruction *Ext, const DataLayout &DL) const
Return true if Load and Ext can form an ExtLoad.
virtual bool
isZExtFree (LLT FromTy, LLT ToTy, LLVMContext &Ctx) const
virtual bool
isSExtCheaperThanZExt (EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual bool
signExtendConstant (const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
virtual bool
optimizeExtendOrTruncateConversion (Instruction *I, Loop *L, const TargetTransformInfo &TTI) const
Try to optimize extending or truncating conversion instructions (like zext, trunc, fptoui, uitofp) for the target.
virtual bool
hasPairedLoad (EVT, Align &) const
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType next to each other in memory.
virtual bool
Return true if the target has a vector blend instruction.
virtual unsigned
getMaxSupportedInterleaveFactor () const
Get the maximum supported factor for interleaved memory accesses.
virtual bool
lowerInterleavedLoad (Instruction *Load, Value *Mask, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor, const APInt &GapMask) const
Lower an interleaved load to target specific intrinsics.
virtual bool
lowerInterleavedStore (Instruction *Store, Value *Mask, ShuffleVectorInst *SVI, unsigned Factor, const APInt &GapMask) const
Lower an interleaved store to target specific intrinsics.
virtual bool
lowerDeinterleaveIntrinsicToLoad (Instruction *Load, Value *Mask, IntrinsicInst *DI) const
Lower a deinterleave intrinsic to a target specific load intrinsic.
virtual bool
lowerInterleaveIntrinsicToStore (Instruction *Store, Value *Mask, ArrayRef< Value * > InterleaveValues) const
Lower an interleave intrinsic to a target specific store intrinsic.
virtual bool
isFPExtFree (EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numbers are implicitly extended to double-precision).
virtual bool
isFPExtFoldable (const MachineInstr &MI, unsigned Opcode, LLT DestTy, LLT SrcTy) const
Return true if an fpext operation input to an Opcode operation is free (for instance, because half-precision floating-point numbers are implicitly extended to float-precision) for an FMA instruction.
virtual bool
isFPExtFoldable (const SelectionDAG &DAG, unsigned Opcode, EVT DestVT, EVT SrcVT) const
Return true if an fpext operation input to an Opcode operation is free (for instance, because half-precision floating-point numbers are implicitly extended to float-precision) for an FMA instruction.
virtual bool
isVectorLoadExtDesirable (SDValue ExtVal) const
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable.
virtual bool
isFNegFree (EVT VT) const
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with a bitwise operation.
virtual bool
isFAbsFree (EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with a bitwise operation.
virtual bool
isFMAFasterThanFMulAndFAdd (const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
virtual bool
isFMAFasterThanFMulAndFAdd (const MachineFunction &MF, LLT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
virtual bool
isFMAFasterThanFMulAndFAdd (const Function &F, Type *) const
IR version.
virtual bool
isFMADLegal (const MachineInstr &MI, LLT Ty) const
Returns true if MI can be combined with another instruction to form TargetOpcode::G_FMAD.
virtual bool
isFMADLegal (const SelectionDAG &DAG, const SDNode *N) const
Returns true if be combined with to form an ISD::FMAD.
virtual bool
generateFMAsInMachineCombiner (EVT VT, CodeGenOptLevel OptLevel) const
virtual bool
isNarrowingProfitable (SDNode *N, EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
virtual bool
shouldFoldSelectWithIdentityConstant (unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const
Return true if pulling a binary operation into a select with an identity constant is profitable.
virtual bool
isExtractSubvectorCheap (EVT ResVT, EVT SrcVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with this index.
virtual bool
shouldScalarizeBinop (SDValue VecOp) const
Try to convert an extract element of a vector binary operation into an extract element followed by a scalar operation.
virtual bool
isExtractVecEltCheap (EVT VT, unsigned Index) const
Return true if extraction of a scalar element from the given vector type at the given index is cheap.
virtual bool
shouldFormOverflowOp (unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
virtual bool
shouldOptimizeMulOverflowWithZeroHighBits (LLVMContext &Context, EVT VT) const
virtual bool
aggressivelyPreferBuildVectorSources (EVT VecVT) const
virtual bool
shouldConsiderGEPOffsetSplit () const
virtual bool
shouldAvoidTransformToShift (EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool
shouldFoldSelectWithSingleBitTest (EVT VT, const APInt &AndMask) const
virtual bool
shouldKeepZExtForFP16Conv () const
Does this target require the clearing of high-order bits in a register passed to the fp16 to fp conversion library function.
virtual bool
shouldConvertFpToSat (unsigned Op, EVT FPVT, EVT VT) const
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) saturation patterns.
virtual bool
preferSelectsOverBooleanArithmetic (EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool
shouldPreservePtrArith (const Function &F, EVT PtrVT) const
True if target has some particular form of dealing with pointer arithmetic semantics for pointers with the given value type.
virtual bool
canTransformPtrArithOutOfBounds (const Function &F, EVT PtrVT) const
True if the target allows transformations of in-bounds pointer arithmetic that cause out-of-bounds intermediate results.
virtual bool
isComplexDeinterleavingSupported () const
Does this target support complex deinterleaving.
virtual bool
isComplexDeinterleavingOperationSupported (ComplexDeinterleavingOperation Operation, Type *Ty) const
Does this target support complex deinterleaving with the given operation and type.
virtual unsigned
getPreferredFPToIntOpcode (unsigned Op, EVT FromVT, EVT ToVT) const
virtual Value *
createComplexDeinterleavingIR (IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const
Create the IR node for the given complex deinterleaving operation.
const RTLIB::RuntimeLibcallsInfo &
getRuntimeLibcallsInfo () const
void
setLibcallImpl (RTLIB::Libcall Call, RTLIB::LibcallImpl Impl)
RTLIB::LibcallImpl
getLibcallImpl (RTLIB::Libcall Call) const
Get the libcall impl routine name for the specified libcall.
getLibcallName (RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
RTLIB::LibcallImpl
RTLIB::LibcallImpl
getSupportedLibcallImpl (StringRef FuncName) const
Check if this is valid libcall for the current module, otherwise RTLIB::Unsupported.
getSoftFloatCmpLibcallPredicate (RTLIB::LibcallImpl Call) const
Get the comparison predicate that's to be used to test the result of the comparison libcall against zero.
getLibcallImplCallingConv (RTLIB::LibcallImpl Call) const
Get the CallingConv that should be used for the specified libcall implementation.
getLibcallCallingConv (RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual void
finalizeLowering (MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
virtual bool
shouldMergeStoreOfLoadsOverCall (EVT, EVT) const
Returns true if it's profitable to allow merging store of loads when there are functions calls between the load and the store.
virtual bool
shouldLocalize (const MachineInstr &MI, const TargetTransformInfo *TTI) const
Check whether or not MI needs to be moved close to its uses.
int
InstructionOpcodeToISD (unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
int
IntrinsicIDToISD (Intrinsic::ID ID) const
Get the ISD node that corresponds to the Intrinsic ID.
getMaxAtomicSizeInBitsSupported () const
Returns the maximum atomic operation size (in bits) supported by the backend.
getMaxDivRemBitWidthSupported () const
Returns the size in bits of the maximum div/rem the backend supports.
getMaxLargeFPConvertBitWidthSupported () const
Returns the size in bits of the maximum fp to/from int conversion the backend supports.
getMinCmpXchgSizeInBits () const
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports.
supportsUnalignedAtomics () const
Whether the target supports unaligned atomic operations.
virtual bool
shouldInsertFencesForAtomic (const Instruction *I) const
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
virtual AtomicOrdering
atomicOperationOrderAfterFenceSplit (const Instruction *I) const
virtual bool
shouldInsertTrailingFenceForAtomicStore (const Instruction *I) const
Whether AtomicExpandPass should automatically insert a trailing fence without reducing the ordering for this atomic.
virtual Value *
emitLoadLinked (IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
virtual Value *
emitStoreConditional (IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const
Perform a store-conditional operation to Addr.
virtual Value *
emitMaskedAtomicRMWIntrinsic (IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const
Perform a masked atomicrmw using a target-specific intrinsic.
virtual void
emitExpandAtomicRMW (AtomicRMWInst *AI) const
Perform a atomicrmw expansion using a target-specific way.
virtual void
emitExpandAtomicStore (StoreInst *SI) const
Perform a atomic store using a target-specific way.
virtual void
emitExpandAtomicLoad (LoadInst *LI) const
Perform a atomic load using a target-specific way.
virtual void
emitExpandAtomicCmpXchg (AtomicCmpXchgInst *CI) const
Perform a cmpxchg expansion using a target-specific method.
virtual void
emitBitTestAtomicRMWIntrinsic (AtomicRMWInst *AI) const
Perform a bit test atomicrmw using a target-specific intrinsic.
virtual void
emitCmpArithAtomicRMWIntrinsic (AtomicRMWInst *AI) const
Perform a atomicrmw which the result is only used by comparison, using a target-specific intrinsic.
virtual Value *
emitMaskedAtomicCmpXchgIntrinsic (IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const
Perform a masked cmpxchg using a target-specific intrinsic.
virtual MachineInstr *
EmitKCFICheck (MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const
virtual Instruction *
emitLeadingFence (IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
virtual Instruction *
emitTrailingFence (IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Additional Inherited Members
Public Types inherited from llvm::TargetLowering
enum
ConstraintType {
C_Register, C_RegisterClass, C_Memory, C_Address,
C_Immediate, C_Other, C_Unknown
}
enum
ConstraintWeight {
CW_Invalid = -1 , CW_Okay = 0 , CW_Good = 1 , CW_Better = 2 ,
CW_Best = 3 , CW_SpecificReg = CW_Okay , CW_Register = CW_Good , CW_Memory = CW_Better ,
CW_Constant = CW_Best , CW_Default = CW_Okay
}
using
AsmOperandInfoVector = std::vector<AsmOperandInfo>
using
ConstraintPair = std::pair<StringRef, TargetLowering::ConstraintType>
using
ConstraintGroup = SmallVector<ConstraintPair>
Public Types inherited from llvm::TargetLoweringBase
enum
LegalizeAction : uint8_t {
Legal, Promote, Expand, LibCall,
Custom
}
This enum indicates whether operations are valid for a target, and if not, what action should be used to make them valid. More...
enum
LegalizeTypeAction : uint8_t {
TypeLegal, TypePromoteInteger, TypeExpandInteger, TypeSoftenFloat,
TypeExpandFloat, TypeScalarizeVector, TypeSplitVector, TypeWidenVector,
TypePromoteFloat, TypeSoftPromoteHalf, TypeScalarizeScalableVector
}
This enum indicates whether a types are legal for a target, and if not, what action should be used to make them valid. More...
enum
BooleanContent { UndefinedBooleanContent, ZeroOrOneBooleanContent, ZeroOrNegativeOneBooleanContent }
Enum that describes how the target represents true/false values. More...
enum
SelectSupportKind { ScalarValSelect, ScalarCondVectorVal, VectorMaskSelect }
Enum that describes what type of support for selects the target has. More...
enum class
AtomicExpansionKind {
None, CastToInteger, LLSC, LLOnly,
CmpXChg, MaskedIntrinsic, BitTestIntrinsic, CmpArithIntrinsic,
Expand, CustomExpand, NotAtomic
}
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all. More...
enum class
MulExpansionKind { Always, OnlyLegalOrCustom }
Enum that specifies when a multiplication should be expanded. More...
enum class
NegatibleCost { Cheaper = 0 , Neutral = 1 , Expensive = 2 }
Enum that specifies when a float negation is beneficial. More...
enum
AndOrSETCCFoldKind : uint8_t { None = 0 , AddAnd = 1 , NotAnd = 2 , ABS = 4 }
Enum of different potentially desirable ways to fold (and/or (setcc ...), (setcc ...)). More...
enum
ReciprocalEstimate : int { Unspecified = -1 , Disabled = 0 , Enabled = 1 }
Reciprocal estimate status values used by the functions below. More...
enum class
ShiftLegalizationStrategy { ExpandToParts, ExpandThroughStack, LowerToLibcall }
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recursion depth - how many expansion needed. More...
using
LegalizeKind = std::pair<LegalizeTypeAction, EVT>
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
using
ArgListTy = std::vector<ArgListEntry>
Static Public Member Functions inherited from llvm::TargetLoweringBase
static ISD::NodeType
getExtendForContent (BooleanContent Content)
static StringRef
getLibcallImplName (RTLIB::LibcallImpl Call)
Get the libcall routine name for the specified libcall implementation.
Protected Member Functions inherited from llvm::TargetLoweringBase
void
initActions ()
Initialize all of the actions to default values.
Value *
getDefaultSafeStackPointerLocation (IRBuilderBase &IRB, bool UseTLS) const
void
setBooleanContents (BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a wider type.
void
setBooleanContents (BooleanContent IntTy, BooleanContent FloatTy)
Specify how the target extends the result of integer and floating point boolean values from i1 to a wider type.
void
setBooleanVectorContents (BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider type.
void
setSchedulingPreference (Sched::Preference Pref)
Specify the target scheduling preference.
void
setMinimumJumpTableEntries (unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void
setMaximumJumpTableSize (unsigned)
Indicate the maximum number of entries in jump tables.
void
setMinimumBitTestCmps (unsigned Val)
Set the minimum of largest of number of comparisons to generate BitTest.
void
setStackPointerRegisterToSaveRestore (Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
void
setHasExtractBitsInsn (bool hasExtractInsn=true)
Tells the code generator that the target has BitExtract instructions.
void
setJumpIsExpensive (bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequences that increase the amount of flow control.
void
addBypassSlowDiv (unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
void
addRegisterClass (MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual std::pair< const TargetRegisterClass *, uint8_t >
findRepresentativeClass (const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and its associated "cost".
void
computeRegisterProperties (const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void
setOperationAction (unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do about it.
void
setOperationAction (ArrayRef< unsigned > Ops, MVT VT, LegalizeAction Action)
void
setOperationAction (ArrayRef< unsigned > Ops, ArrayRef< MVT > VTs, LegalizeAction Action)
void
setLoadExtAction (unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate what to do about it.
void
setLoadExtAction (ArrayRef< unsigned > ExtTypes, MVT ValVT, MVT MemVT, LegalizeAction Action)
void
setLoadExtAction (ArrayRef< unsigned > ExtTypes, MVT ValVT, ArrayRef< MVT > MemVTs, LegalizeAction Action)
void
setAtomicLoadExtAction (unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
void
setAtomicLoadExtAction (ArrayRef< unsigned > ExtTypes, MVT ValVT, MVT MemVT, LegalizeAction Action)
void
setAtomicLoadExtAction (ArrayRef< unsigned > ExtTypes, MVT ValVT, ArrayRef< MVT > MemVTs, LegalizeAction Action)
void
setTruncStoreAction (MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what to do about it.
void
setIndexedLoadAction (ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate what to do abort it.
void
setIndexedLoadAction (ArrayRef< unsigned > IdxModes, ArrayRef< MVT > VTs, LegalizeAction Action)
void
setIndexedStoreAction (ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate what to do about it.
void
setIndexedStoreAction (ArrayRef< unsigned > IdxModes, ArrayRef< MVT > VTs, LegalizeAction Action)
void
setIndexedMaskedLoadAction (unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked load does or does not work with the specified type and indicate what to do about it.
void
setIndexedMaskedStoreAction (unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked store does or does not work with the specified type and indicate what to do about it.
void
setCondCodeAction (ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to do about it.
void
setCondCodeAction (ArrayRef< ISD::CondCode > CCs, ArrayRef< MVT > VTs, LegalizeAction Action)
void
setPartialReduceMLAAction (unsigned Opc, MVT AccVT, MVT InputVT, LegalizeAction Action)
Indicate how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type InputVT should be treated by the target.
void
setPartialReduceMLAAction (ArrayRef< unsigned > Opcodes, MVT AccVT, MVT InputVT, LegalizeAction Action)
void
AddPromotedToType (unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/fp until it can find one that works.
void
setOperationPromotedToType (unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
void
setOperationPromotedToType (ArrayRef< unsigned > Ops, MVT OrigVT, MVT DestVT)
void
setTargetDAGCombine (ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom DAG combiner for by implementing the PerformDAGCombine virtual method.
void
setMinFunctionAlignment (Align Alignment)
Set the target's minimum function alignment.
void
setPrefFunctionAlignment (Align Alignment)
Set the target's preferred function alignment.
void
setPrefLoopAlignment (Align Alignment)
Set the target's preferred loop alignment.
void
setMaxBytesForAlignment (unsigned MaxBytes)
void
setMinStackArgumentAlignment (Align Alignment)
Set the minimum stack alignment of an argument.
void
setMaxAtomicSizeInBitsSupported (unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void
setMaxDivRemBitWidthSupported (unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
void
setMaxLargeFPConvertBitWidthSupported (unsigned SizeInBits)
Set the size in bits of the maximum fp to/from int conversion the backend supports.
void
setMinCmpXchgSizeInBits (unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void
setSupportsUnalignedAtomics (bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
virtual bool
isExtFreeImpl (const Instruction *I) const
Return true if the extension represented by I is free.
isLegalRC (const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
emitPatchPoint (MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that is recognized by PrologEpilogInserter.
Protected Attributes inherited from llvm::TargetLoweringBase
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more preferable chain.
Specify maximum number of store instructions per memset call.
Likewise for functions with the OptSize attribute.
Specify maximum number of store instructions per memcpy call.
Likewise for functions with the OptSize attribute.
Specify max number of store instructions to glue in inlined memcpy.
Specify maximum number of load instructions per memcmp call.
Likewise for functions with the OptSize attribute.
Specify maximum number of store instructions per memmove call.
Likewise for functions with the OptSize attribute.
Tells the code generator that select is more expensive than a branch if the branch is usually predicted right.