LLVM: llvm::AArch64InstrInfo Class Reference (original) (raw)

#include "[Target/AArch64/AArch64InstrInfo.h](AArch64InstrInfo%5F8h%5Fsource.html)"

Public Member Functions
AArch64InstrInfo (const AArch64Subtarget &STI)
const AArch64RegisterInfo & getRegisterInfo () const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
unsigned getInstSizeInBytes (const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
bool isAsCheapAsAMove (const MachineInstr &MI) const override
bool isCoalescableExtInstr (const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool areMemAccessesTriviallyDisjoint (const MachineInstr &MIa, const MachineInstr &MIb) const override
Register isLoadFromStackSlot (const MachineInstr &MI, int &FrameIndex) const override
Register isStoreToStackSlot (const MachineInstr &MI, int &FrameIndex) const override
Register isStoreToStackSlotPostFE (const MachineInstr &MI, int &FrameIndex) const override
Check for post-frame ptr elimination stack locations as well.
Register isLoadFromStackSlotPostFE (const MachineInstr &MI, int &FrameIndex) const override
Check for post-frame ptr elimination stack locations as well.
bool isCandidateToMergeOrPair (const MachineInstr &MI) const
Return true if this is a load/store that can be potentially paired/merged.
std::optional< ExtAddrMode > getAddrModeFromMemoryOp (const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override
bool canFoldIntoAddrMode (const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
MachineInstr * emitLdStWithAddr (MachineInstr &MemI, const ExtAddrMode &AM) const override
bool getMemOperandsWithOffsetWidth (const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
bool getMemOperandWithOffsetWidth (const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, TypeSize &Width, const TargetRegisterInfo *TRI) const
If OffsetIsScalable is set to 'true', the offset is scaled by vscale.
MachineOperand & getMemOpBaseRegImmOfsOffsetOperand (MachineInstr &LdSt) const
Return the immediate offset of the base register in a load/store LdSt.
bool shouldClusterMemOps (ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Detect opportunities for ldp/stp formation.
void copyPhysRegTuple (MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, llvm::ArrayRef< unsigned > Indices) const
void copyGPRRegTuple (MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, unsigned ZeroReg, llvm::ArrayRef< unsigned > Indices) const
void copyPhysReg (MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
void storeRegToStackSlot (MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void loadRegFromStackSlot (MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isSubregFoldable () const override
MachineInstr * foldMemoryOperandImpl (MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isBranchOffsetInRange (unsigned BranchOpc, int64_t BrOffset) const override
MachineBasicBlock * getBranchDestBlock (const MachineInstr &MI) const override
void insertIndirectBranch (MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool analyzeBranch (MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
bool analyzeBranchPredicate (MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify) const override
unsigned removeBranch (MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned insertBranch (MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining (MachineBasicBlock *LoopBB) const override
bool reverseBranchCondition (SmallVectorImpl< MachineOperand > &Cond) const override
bool canInsertSelect (const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
void insertSelect (MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
void insertNoop (MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
MCInst getNop () const override
bool isSchedulingBoundary (const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
bool analyzeCompare (const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2, and the value it compares against in CmpValue.
bool optimizeCompareInstr (MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction supplying the argument to the comparison into one that sets the zero bit in the flags register.
bool optimizeCondBranch (MachineInstr &MI) const override
Replace csincr-branch sequence by simple conditional branch.
CombinerObjective getCombinerObjective (unsigned Pattern) const override
bool isThroughputPattern (unsigned Pattern) const override
Return true when a code sequence can improve throughput.
MachineBasicBlock::iterator probedStackAlloc (MachineBasicBlock::iterator MBBI, Register TargetReg, bool FrameSetup) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual MachineInstr * foldMemoryOperandImpl (MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
Static Public Member Functions
static bool isGPRZero (const MachineInstr &MI)
Does this instruction set its full destination register to zero?
static bool isGPRCopy (const MachineInstr &MI)
Does this instruction rename a GPR without modifying bits?
static bool isFPRCopy (const MachineInstr &MI)
Does this instruction rename an FPR without modifying bits?
static bool isLdStPairSuppressed (const MachineInstr &MI)
Return true if pairing the given load or store is hinted to be unprofitable.
static bool isStridedAccess (const MachineInstr &MI)
Return true if the given load or store is a strided memory access.
static bool hasUnscaledLdStOffset (unsigned Opc)
Return true if it has an unscaled load/store offset.
static bool hasUnscaledLdStOffset (MachineInstr &MI)
static std::optional< unsigned > getUnscaledLdSt (unsigned Opc)
Returns the unscaled load/store for the scaled load/store opcode, if there is a corresponding unscaled variant available.
static int getMemScale (unsigned Opc)
Scaling factor for (scaled or unscaled) load or store.
static int getMemScale (const MachineInstr &MI)
static bool isPreLd (const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load.
static bool isPreSt (const MachineInstr &MI)
Returns whether the instruction is a pre-indexed store.
static bool isPreLdSt (const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load/store.
static bool isPairedLdSt (const MachineInstr &MI)
Returns whether the instruction is a paired load/store.
static const MachineOperand & getLdStBaseOp (const MachineInstr &MI)
Returns the base register operator of a load/store.
static const MachineOperand & getLdStOffsetOp (const MachineInstr &MI)
Returns the immediate offset operator of a load/store.
static bool isFpOrNEON (Register Reg)
Returns whether the physical register is FP or NEON.
static const MachineOperand & getLdStAmountOp (const MachineInstr &MI)
Returns the shift amount operator of a load/store.
static bool isFpOrNEON (const MachineInstr &MI)
Returns whether the instruction is FP or NEON.
static bool isHForm (const MachineInstr &MI)
Returns whether the instruction is in H form (16 bit operands)
static bool isQForm (const MachineInstr &MI)
Returns whether the instruction is in Q form (128 bit operands)
static bool hasBTISemantics (const MachineInstr &MI)
Returns whether the instruction can be compatible with non-zero BTYPE.
static unsigned getLoadStoreImmIdx (unsigned Opc)
Returns the index for the immediate for a given instruction.
static bool isPairableLdStInst (const MachineInstr &MI)
Return true if pairing the given load or store may be paired with another.
static bool isTailCallReturnInst (const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static unsigned convertToFlagSettingOpc (unsigned Opc)
Return the opcode that set flags when possible.
static void suppressLdStPair (MachineInstr &MI)
Hint that pairing the given load or store is unprofitable.
static bool getMemOpInfo (unsigned Opcode, TypeSize &Scale, TypeSize &Width, int64_t &MinOffset, int64_t &MaxOffset)
Returns true if opcode Opc is a memory operation.

Definition at line 180 of file AArch64InstrInfo.h.

analyzeBranch()

Definition at line 395 of file AArch64InstrInfo.cpp.

References assert(), Cond, llvm::MachineInstr::eraseFromParent(), getBranchDestBlock(), llvm::MachineOperand::getMBB(), llvm::MachineInstr::getOpcode(), llvm::MachineInstr::getOperand(), I, llvm::isCondBranchOpcode(), llvm::isIndirectBranchOpcode(), llvm::isUncondBranchOpcode(), MBB, parseCondBranch(), and TBB.

Referenced by analyzeLoopForPipelining().

analyzeBranchPredicate()

bool AArch64InstrInfo::analyzeBranchPredicate ( MachineBasicBlock & MBB, MachineBranchPredicate & MBP, bool AllowModify ) const override

analyzeCompare()

analyzeLoopForPipelining()

Definition at line 11493 of file AArch64InstrInfo.cpp.

References analyzeBranch(), assert(), Cond, llvm::MachineBasicBlock::getFirstTerminator(), getIndVarInfo(), llvm::MachineInstr::getOpcode(), llvm::MachineInstr::getOperand(), llvm::MachineOperand::getReg(), getRegisterInfo(), isDefinedOutside(), MI, llvm::reverse(), reverseBranchCondition(), TBB, and TRI.

areMemAccessesTriviallyDisjoint()

Definition at line 1377 of file AArch64InstrInfo.cpp.

References assert(), llvm::details::FixedOrScalableQuantity< LeafTy, ValueTy >::getKnownMinValue(), getMemOperandWithOffsetWidth(), getRegisterInfo(), llvm::MachineInstr::hasOrderedMemoryRef(), llvm::MachineInstr::hasUnmodeledSideEffects(), llvm::MachineOperand::isIdenticalTo(), llvm::details::FixedOrScalableQuantity< LeafTy, ValueTy >::isScalable(), llvm::MachineInstr::mayLoadOrStore(), and TRI.

canFoldIntoAddrMode()

Definition at line 3316 of file AArch64InstrInfo.cpp.

References llvm::ExtAddrMode::BaseReg, llvm::ExtAddrMode::Basic, DefMI, llvm::ExtAddrMode::Displacement, llvm::ExtAddrMode::Form, llvm::AArch64_AM::getArithExtendType(), llvm::AArch64_AM::getArithShiftValue(), llvm::MachineFunction::getFunction(), llvm::getImm(), llvm::MachineOperand::getImm(), llvm::MachineInstr::getMF(), llvm::MachineInstr::getOpcode(), llvm::MachineInstr::getOperand(), llvm::MachineOperand::getReg(), llvm::MachineFunction::getRegInfo(), llvm::AArch64_AM::getShiftType(), llvm::AArch64_AM::getShiftValue(), llvm::Function::hasOptSize(), llvm::MachineOperand::isReg(), llvm::Register::isVirtual(), llvm::AArch64_AM::LSL, MRI, llvm::ExtAddrMode::Scale, llvm::ExtAddrMode::ScaledReg, llvm::ExtAddrMode::SExtScaledReg, llvm::AArch64_AM::SXTW, llvm::AArch64_AM::UXTW, and llvm::ExtAddrMode::ZExtScaledReg.

canInsertSelect()

convertToFlagSettingOpc()

Return the opcode that set flags when possible.

The caller is responsible for ensuring the opc has a flag setting equivalent.

Definition at line 3104 of file AArch64InstrInfo.cpp.

References llvm_unreachable, and Opc.

copyGPRRegTuple()

Definition at line 5308 of file AArch64InstrInfo.cpp.

References llvm::MachineInstrBuilder::addImm(), llvm::MachineInstrBuilder::addReg(), AddSubReg(), assert(), llvm::BuildMI(), llvm::RegState::Define, DL, llvm::get(), llvm::getKillRegState(), getRegisterInfo(), I, MBB, llvm::ArrayRef< T >::size(), SubReg, and TRI.

Referenced by copyPhysReg().

copyPhysReg()

Definition at line 5333 of file AArch64InstrInfo.cpp.

References llvm::MachineInstrBuilder::addDef(), llvm::MachineInstrBuilder::addImm(), llvm::MachineInstrBuilder::addReg(), llvm::Register::asMCReg(), assert(), llvm::BuildMI(), contains(), copyGPRRegTuple(), copyPhysRegTuple(), llvm::RegState::Define, DL, llvm::errs(), llvm::get(), llvm::getKillRegState(), llvm::AArch64_AM::getShifterImm(), I, llvm::RegState::Implicit, llvm::MCRegister::isValid(), llvm_unreachable, llvm::AArch64_AM::LSL, MBB, and llvm::RegState::Undef.

copyPhysRegTuple()

Definition at line 5281 of file AArch64InstrInfo.cpp.

References AddSubReg(), assert(), llvm::BuildMI(), llvm::RegState::Define, DL, forwardCopyWillClobberTuple(), llvm::get(), llvm::getKillRegState(), getRegisterInfo(), I, MBB, llvm::ArrayRef< T >::size(), SubReg, and TRI.

Referenced by copyPhysReg().

emitLdStWithAddr()

Definition at line 3994 of file AArch64InstrInfo.cpp.

References llvm::MachineInstrBuilder::addImm(), llvm::MachineInstrBuilder::addReg(), assert(), B(), llvm::ExtAddrMode::BaseReg, llvm::ExtAddrMode::Basic, llvm::BuildMI(), llvm::RegState::Define, llvm::ExtAddrMode::Displacement, DL, llvm::ExtAddrMode::Form, llvm::get(), llvm::MachineInstr::getDebugLoc(), llvm::MachineInstr::getFlags(), llvm::MachineInstr::getMF(), llvm::MachineInstr::getOpcode(), llvm::MachineInstr::getOperand(), llvm::MachineInstr::getParent(), llvm::MachineOperand::getReg(), llvm::MachineFunction::getRegInfo(), llvm::TargetRegisterClass::hasSuperClassEq(), llvm::isInt(), llvm_unreachable, llvm::MachineInstr::mayLoad(), MBB, llvm::MachineInstr::memoperands(), MRI, offsetExtendOpcode(), regOffsetOpcode(), llvm::ExtAddrMode::Scale, scaledOffsetOpcode(), llvm::ExtAddrMode::ScaledReg, llvm::MachineInstrBuilder::setMemRefs(), llvm::MachineInstrBuilder::setMIFlags(), llvm::ExtAddrMode::SExtScaledReg, unscaledOffsetOpcode(), and llvm::ExtAddrMode::ZExtScaledReg.

foldMemoryOperandImpl() [1/2]

Definition at line 6608 of file AArch64InstrInfo.cpp.

References AbstractManglingParser< Derived, Alloc >::Ops, assert(), llvm::MachineRegisterInfo::constrainRegClass(), llvm::MachineInstr::getOperand(), llvm::MachineOperand::getReg(), getRegClass(), llvm::MachineFunction::getRegInfo(), llvm::TargetSubtargetInfo::getRegisterInfo(), llvm::MachineOperand::getSubReg(), llvm::MachineFunction::getSubtarget(), llvm::MachineOperand::isKill(), llvm::MachineOperand::isUndef(), llvm::Register::isVirtual(), llvm::Register::isVirtualRegister(), loadRegFromStackSlot(), MBB, MI, MRI, Register, llvm::MachineOperand::setIsUndef(), llvm::MachineOperand::setSubReg(), storeRegToStackSlot(), and TRI.

foldMemoryOperandImpl() [2/2]

Target-dependent implementation for foldMemoryOperand.

Target-independent code in foldMemoryOperand will take care of adding a MachineMemOperand to the newly created instruction. The instruction and any auxiliary instructions necessary will be inserted at InsertPt.

Definition at line 1463 of file TargetInstrInfo.h.

getAddrModeFromMemoryOp()

getBranchDestBlock()

getCombinerObjective()

getInstSizeInBytes()

GetInstSize - Return the number of bytes of code the specified instruction may be.

This returns the maximum number of bytes.

Definition at line 100 of file AArch64InstrInfo.cpp.

References assert(), F, llvm::AArch64PAuth::getCheckerSizeInBytes(), llvm::MachineFunction::getFunction(), llvm::MachineFunction::getInfo(), llvm::TargetMachine::getMCAsmInfo(), llvm::PatchPointOpers::getNumPatchBytes(), llvm::StackMapOpers::getNumPatchBytes(), llvm::StatepointOpers::getNumPatchBytes(), llvm::MachineFunction::getSubtarget(), llvm::MachineFunction::getTarget(), isTailCallReturnInst(), MBB, and MI.

getLdStAmountOp()

getLdStBaseOp()

getLdStOffsetOp()

getLoadStoreImmIdx()

getMemOpBaseRegImmOfsOffsetOperand()

getMemOperandsWithOffsetWidth()

getMemOperandWithOffsetWidth()

If OffsetIsScalable is set to 'true', the offset is scaled by vscale.

This is true for some SVE instructions like ldr/str that have a 'reg + imm' addressing mode where the immediate is an index to the scalable vector located at 'reg + imm * vscale x #bytes'.

Definition at line 4294 of file AArch64InstrInfo.cpp.

References assert(), llvm::MachineOperand::getImm(), llvm::details::FixedOrScalableQuantity< LeafTy, ValueTy >::getKnownMinValue(), getMemOpInfo(), llvm::MachineInstr::getNumExplicitOperands(), llvm::MachineInstr::getOpcode(), llvm::MachineInstr::getOperand(), llvm::MachineOperand::isFI(), llvm::MachineOperand::isImm(), isPostIndexLdStOpcode(), llvm::MachineOperand::isReg(), llvm::details::FixedOrScalableQuantity< LeafTy, ValueTy >::isScalable(), llvm::MachineInstr::mayLoadOrStore(), llvm::Offset, and TRI.

Referenced by areMemAccessesTriviallyDisjoint(), and getMemOperandsWithOffsetWidth().

getMemOpInfo()

bool AArch64InstrInfo::getMemOpInfo ( unsigned Opcode, TypeSize & Scale, TypeSize & Width, int64_t & MinOffset, int64_t & MaxOffset ) static

Returns true if opcode Opc is a memory operation.

If it is, set Scale, Width, MinOffset, and MaxOffset accordingly.

For unscaled instructions, Scale is set to 1. All values are in bytes. MinOffset/MaxOffset are the un-scaled limits of the immediate in the instruction, the actual offset limit is [MinOffset*Scale, MaxOffset*Scale].

Definition at line 4350 of file AArch64InstrInfo.cpp.

References llvm::TypeSize::getFixed(), and llvm::TypeSize::getScalable().

Referenced by emitLoad(), emitStore(), getMemOperandWithOffsetWidth(), and llvm::isAArch64FrameOffsetLegal().

getMemScale() [1/2]

getMemScale() [2/2]

int AArch64InstrInfo::getMemScale ( unsigned Opc) static

getNop()

MCInst AArch64InstrInfo::getNop ( ) const override

getRegisterInfo()

getRegisterInfo - TargetInstrInfo is a superset of MRegister info.

As such, whenever a client has an instance of instruction info, it should always be able to get register info as well (through this method).

Definition at line 190 of file AArch64InstrInfo.h.

Referenced by analyzeLoopForPipelining(), areMemAccessesTriviallyDisjoint(), copyGPRRegTuple(), copyPhysRegTuple(), llvm::AArch64Subtarget::getRegisterInfo(), isCandidateToMergeOrPair(), loadRegFromStackSlot(), optimizeCondBranch(), and storeRegToStackSlot().

getUnscaledLdSt()

std::optional< unsigned > AArch64InstrInfo::getUnscaledLdSt ( unsigned Opc) static

hasBTISemantics()

hasUnscaledLdStOffset() [1/2]

bool llvm::AArch64InstrInfo::hasUnscaledLdStOffset ( MachineInstr & MI) inlinestatic

hasUnscaledLdStOffset() [2/2]

bool AArch64InstrInfo::hasUnscaledLdStOffset ( unsigned Opc) static

insertBranch()

insertIndirectBranch()

Definition at line 322 of file AArch64InstrInfo.cpp.

References llvm::MachineInstrBuilder::addImm(), llvm::MachineInstrBuilder::addMBB(), llvm::MachineInstrBuilder::addReg(), llvm::MachineInstrBuilder::addSym(), assert(), llvm::BuildMI(), llvm::MBBSectionID::ColdSectionID, llvm::RegState::Define, DL, llvm::MachineBasicBlock::empty(), llvm::MachineBasicBlock::end(), llvm::get(), llvm::AArch64FunctionInfo::hasRedZone(), llvm::isInt(), MBB, llvm::AArch64II::MO_NC, llvm::AArch64II::MO_PAGE, llvm::AArch64II::MO_PAGEOFF, and llvm::report_fatal_error().

insertNoop()

insertSelect()

Definition at line 871 of file AArch64InstrInfo.cpp.

References llvm::MachineInstrBuilder::addImm(), llvm::MachineInstrBuilder::addReg(), assert(), llvm::BuildMI(), canFoldIntoCSel(), Cond, DL, llvm::AArch64_AM::encodeLogicalImmediate(), llvm::AArch64CC::EQ, llvm::get(), llvm::AArch64_AM::getArithExtendImm(), llvm::AArch64_AM::getExtendType(), llvm::getImm(), llvm::AArch64CC::getInvertedCondCode(), getReg(), I, llvm::AArch64_AM::InvalidShiftExtend, llvm::Register::isVirtual(), llvm_unreachable, MBB, MBBI, MRI, llvm::AArch64CC::NE, Opc, llvm::AArch64_AM::SXTB, llvm::AArch64_AM::SXTH, llvm::AArch64_AM::UXTB, and llvm::AArch64_AM::UXTH.

isAsCheapAsAMove()

isBranchOffsetInRange()

bool AArch64InstrInfo::isBranchOffsetInRange ( unsigned BranchOpc, int64_t BrOffset ) const override

isCandidateToMergeOrPair()

isCoalescableExtInstr()

isCopyInstrImpl()

If the specific machine instruction is an instruction that moves/copies value from one register to another register return destination and source registers as machine operands.

Definition at line 10842 of file AArch64InstrInfo.cpp.

References llvm::getXRegFromWReg(), and MI.

isCopyLikeInstrImpl()

isFpOrNEON() [1/2]

isFpOrNEON() [2/2]

isFPRCopy()

isGPRCopy()

isGPRZero()

isHForm()

isLdStPairSuppressed()

isLoadFromStackSlot()

isLoadFromStackSlotPostFE()

isPairableLdStInst()

isPairedLdSt()

isPreLd()

isPreLdSt()

isPreSt()

isQForm()

isSchedulingBoundary()

isStoreToStackSlot()

isStoreToStackSlotPostFE()

isStridedAccess()

Return true if the given load or store is a strided memory access.

Check all MachineMemOperands for a hint that the load/store is strided.

Definition at line 2696 of file AArch64InstrInfo.cpp.

References llvm::any_of(), and MI.

isSubregFoldable()

bool llvm::AArch64InstrInfo::isSubregFoldable ( ) const inlineoverride

isTailCallReturnInst()

isThroughputPattern()

bool AArch64InstrInfo::isThroughputPattern ( unsigned Pattern) const override

Return true when a code sequence can improve throughput.

It should be called only for instructions in loops.

Parameters

Definition at line 7673 of file AArch64InstrInfo.cpp.

References llvm::FMLAv1i32_indexed_OP1, llvm::FMLAv1i32_indexed_OP2, llvm::FMLAv1i64_indexed_OP1, llvm::FMLAv1i64_indexed_OP2, llvm::FMLAv2f32_OP1, llvm::FMLAv2f32_OP2, llvm::FMLAv2f64_OP1, llvm::FMLAv2f64_OP2, llvm::FMLAv2i32_indexed_OP1, llvm::FMLAv2i32_indexed_OP2, llvm::FMLAv2i64_indexed_OP1, llvm::FMLAv2i64_indexed_OP2, llvm::FMLAv4f16_OP1, llvm::FMLAv4f16_OP2, llvm::FMLAv4f32_OP1, llvm::FMLAv4f32_OP2, llvm::FMLAv4i16_indexed_OP1, llvm::FMLAv4i16_indexed_OP2, llvm::FMLAv4i32_indexed_OP1, llvm::FMLAv4i32_indexed_OP2, llvm::FMLAv8f16_OP1, llvm::FMLAv8f16_OP2, llvm::FMLAv8i16_indexed_OP1, llvm::FMLAv8i16_indexed_OP2, llvm::FMLSv1i32_indexed_OP2, llvm::FMLSv1i64_indexed_OP2, llvm::FMLSv2f32_OP2, llvm::FMLSv2f64_OP2, llvm::FMLSv2i32_indexed_OP2, llvm::FMLSv2i64_indexed_OP2, llvm::FMLSv4f16_OP1, llvm::FMLSv4f16_OP2, llvm::FMLSv4f32_OP2, llvm::FMLSv4i16_indexed_OP1, llvm::FMLSv4i16_indexed_OP2, llvm::FMLSv4i32_indexed_OP2, llvm::FMLSv8f16_OP1, llvm::FMLSv8f16_OP2, llvm::FMLSv8i16_indexed_OP1, llvm::FMLSv8i16_indexed_OP2, llvm::FMULADDD_OP1, llvm::FMULADDD_OP2, llvm::FMULADDH_OP1, llvm::FMULADDH_OP2, llvm::FMULADDS_OP1, llvm::FMULADDS_OP2, llvm::FMULSUBD_OP1, llvm::FMULSUBD_OP2, llvm::FMULSUBH_OP1, llvm::FMULSUBH_OP2, llvm::FMULSUBS_OP1, llvm::FMULSUBS_OP2, llvm::FMULv2i32_indexed_OP1, llvm::FMULv2i32_indexed_OP2, llvm::FMULv2i64_indexed_OP1, llvm::FMULv2i64_indexed_OP2, llvm::FMULv4i16_indexed_OP1, llvm::FMULv4i16_indexed_OP2, llvm::FMULv4i32_indexed_OP1, llvm::FMULv4i32_indexed_OP2, llvm::FMULv8i16_indexed_OP1, llvm::FMULv8i16_indexed_OP2, llvm::FNMULSUBD_OP1, llvm::FNMULSUBH_OP1, llvm::FNMULSUBS_OP1, llvm::MULADDv16i8_OP1, llvm::MULADDv16i8_OP2, llvm::MULADDv2i32_indexed_OP1, llvm::MULADDv2i32_indexed_OP2, llvm::MULADDv2i32_OP1, llvm::MULADDv2i32_OP2, llvm::MULADDv4i16_indexed_OP1, llvm::MULADDv4i16_indexed_OP2, llvm::MULADDv4i16_OP1, llvm::MULADDv4i16_OP2, llvm::MULADDv4i32_indexed_OP1, llvm::MULADDv4i32_indexed_OP2, llvm::MULADDv4i32_OP1, llvm::MULADDv4i32_OP2, llvm::MULADDv8i16_indexed_OP1, llvm::MULADDv8i16_indexed_OP2, llvm::MULADDv8i16_OP1, llvm::MULADDv8i16_OP2, llvm::MULADDv8i8_OP1, llvm::MULADDv8i8_OP2, llvm::MULSUBv16i8_OP1, llvm::MULSUBv16i8_OP2, llvm::MULSUBv2i32_indexed_OP1, llvm::MULSUBv2i32_indexed_OP2, llvm::MULSUBv2i32_OP1, llvm::MULSUBv2i32_OP2, llvm::MULSUBv4i16_indexed_OP1, llvm::MULSUBv4i16_indexed_OP2, llvm::MULSUBv4i16_OP1, llvm::MULSUBv4i16_OP2, llvm::MULSUBv4i32_indexed_OP1, llvm::MULSUBv4i32_indexed_OP2, llvm::MULSUBv4i32_OP1, llvm::MULSUBv4i32_OP2, llvm::MULSUBv8i16_indexed_OP1, llvm::MULSUBv8i16_indexed_OP2, llvm::MULSUBv8i16_OP1, llvm::MULSUBv8i16_OP2, llvm::MULSUBv8i8_OP1, and llvm::MULSUBv8i8_OP2.

loadRegFromStackSlot()

Definition at line 6034 of file AArch64InstrInfo.cpp.

References llvm::MachineInstrBuilder::addFrameIndex(), llvm::MachineInstrBuilder::addReg(), assert(), llvm::BuildMI(), llvm::MachineRegisterInfo::constrainRegClass(), llvm::dwarf_linker::DebugLoc, llvm::TargetStackID::Default, llvm::get(), llvm::getDefRegState(), llvm::MachinePointerInfo::getFixedStack(), llvm::MachineFunction::getFrameInfo(), llvm::MachineFunction::getMachineMemOperand(), llvm::MachineFrameInfo::getObjectAlign(), llvm::MachineFrameInfo::getObjectSize(), llvm::MachineFunction::getRegInfo(), getRegisterInfo(), llvm::RegState::Implicit, llvm::Register::isValid(), llvm::Register::isVirtual(), loadRegPairFromStackSlot(), MBB, MBBI, MI, llvm::MachineMemOperand::MOLoad, llvm::MCRegister::NoRegister, llvm::Offset, Opc, llvm::TargetStackID::ScalablePredicateVector, llvm::TargetStackID::ScalableVector, llvm::MachineFrameInfo::setStackID(), and TRI.

Referenced by foldMemoryOperandImpl().

optimizeCompareInstr()

optimizeCompareInstr - Convert the instruction supplying the argument to the comparison into one that sets the zero bit in the flags register.

Try to optimize a compare instruction.

A compare instruction is an instruction which produces AArch64::NZCV. It can be truly compare instruction when there are no uses of its destination register.

The following steps are tried in order:

  1. Convert CmpInstr into an unconditional version.
  2. Remove CmpInstr if above there is an instruction producing a needed condition code or an instruction which can be converted into such an instruction. Only comparison with zero is supported.

Definition at line 1846 of file AArch64InstrInfo.cpp.

References assert(), convertToNonFlagSettingOpc(), llvm::MachineInstr::definesRegister(), llvm::MachineInstr::eraseFromParent(), llvm::MachineInstr::findRegisterDefOperandIdx(), llvm::get(), llvm::MachineInstr::getOpcode(), llvm::MachineInstr::getOperand(), llvm::MachineInstr::getParent(), llvm::MachineOperand::getReg(), MRI, Opc, llvm::MachineInstr::removeOperand(), llvm::MachineInstr::setDesc(), llvm::succeeded(), and UpdateOperandRegClass().

optimizeCondBranch()

Replace csincr-branch sequence by simple conditional branch.

Examples:

  1. csinc w9, wzr, wzr,
    tbnz w9, #0, 0x44
    to
    b.
  2. csinc w9, wzr, wzr,
    tbz w9, #0, 0x44
    to

Replace compare and branch sequence by TBZ/TBNZ instruction when the compare's constant operand is power of 2.

Examples:

and w8, w8, #0x400

cbnz w8, L1

to

Parameters

Returns

True when the simple conditional branch is generated

Definition at line 9491 of file AArch64InstrInfo.cpp.

References llvm::MachineInstrBuilder::addImm(), llvm::MachineInstrBuilder::addMBB(), llvm::MachineInstrBuilder::addReg(), AK_Write, areCFlagsAccessedBetweenInstrs(), assert(), llvm::BuildMI(), llvm::AArch64_AM::decodeLogicalImmediate(), DefMI, DL, llvm::get(), llvm::AArch64CC::getInvertedCondCode(), llvm::MachineInstr::getOperand(), llvm::MachineOperand::getReg(), llvm::MachineFunction::getRegInfo(), getRegisterInfo(), llvm::isPowerOf2_64(), llvm::Register::isVirtual(), llvm_unreachable, llvm::Log2_64(), MBB, MI, MRI, Opc, llvm::MachineOperand::setIsKill(), llvm::MachineOperand::setSubReg(), and TBB.

probedStackAlloc()

Return true when there is potentially a faster code sequence for an instruction chain ending in Root.

All potential patterns are / listed in the Patterns array. bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl &Patterns, bool DoRegPressureReduce) const override; / Return true when Inst is associative and commutative so that it can be / reassociated. If Invert is true, then the inverse of Inst operation must / be checked. bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override;

/ Returns true if \P Opcode is an instruction which performs accumulation / into a destination register. bool isAccumulationOpcode(unsigned Opcode) const override;

/ Returns an opcode which defines the accumulator used by \P Opcode. unsigned getAccumulationStartOpcode(unsigned Opcode) const override;

unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const override;

/ When getMachineCombinerPatterns() finds patterns, this function / generates the instructions that could replace the original code / sequence void genAlternativeCodeSequence( MachineInstr &Root, unsigned Pattern, SmallVectorImpl<MachineInstr *> &InsInstrs, SmallVectorImpl<MachineInstr *> &DelInstrs, DenseMap<Register, unsigned> &InstrIdxForVirtReg) const override; / AArch64 supports MachineCombiner. bool useMachineCombiner() const override;

bool expandPostRAPseudo(MachineInstr &MI) const override;

std::pair<unsigned, unsigned> decomposeMachineOperandsTargetFlags(unsigned TF) const override; ArrayRef<std::pair<unsigned, const char *>> getSerializableDirectMachineOperandTargetFlags() const override; ArrayRef<std::pair<unsigned, const char *>> getSerializableBitmaskMachineOperandTargetFlags() const override; ArrayRef<std::pair<MachineMemOperand::Flags, const char *>> getSerializableMachineMemOperandTargetFlags() const override;

bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override; std::optional<std::unique_ptroutliner::OutlinedFunction> getOutliningCandidateInfo( const MachineModuleInfo &MMI, std::vectoroutliner::Candidate &RepeatedSequenceLocs, unsigned MinRepeats) const override; void mergeOutliningCandidateAttributes( Function &F, std::vectoroutliner::Candidate &Candidates) const override; outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override; SmallVector< std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>> getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const override; void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override; MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override; bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;

void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects = true) const override;

/ Returns the vector element size (B, H, S or D) of an SVE opcode. uint64_t getElementSizeForOpcode(unsigned Opc) const; / Returns true if the opcode is for an SVE instruction that sets the / condition codes as if it's results had been fed to a PTEST instruction / along with the same general predicate. bool isPTestLikeOpcode(unsigned Opc) const; / Returns true if the opcode is for an SVE WHILE## instruction. bool isWhileOpcode(unsigned Opc) const; / Returns true if the instruction has a shift by immediate that can be / executed in one cycle less. static bool isFalkorShiftExtFast(const MachineInstr &MI); / Return true if the instructions is a SEH instruction used for unwinding / on Windows. static bool isSEHInstruction(const MachineInstr &MI);

std::optional isAddImmediate(const MachineInstr &MI, Register Reg) const override;

bool isFunctionSafeToSplit(const MachineFunction &MF) const override;

bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const override;

std::optional describeLoadedValue(const MachineInstr &MI, Register Reg) const override;

unsigned int getTailDuplicateSize(CodeGenOptLevel OptLevel) const override;

bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, MachineRegisterInfo &MRI) const override;

static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset, int64_t &NumBytes, int64_t &NumPredicateVectors, int64_t &NumDataVectors); static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset, int64_t &ByteSized, int64_t &VGSized);

Return true if address of the form BaseReg + Scale * ScaledReg + Offset can be used for a load/store of NumBytes. BaseReg is always present and implicit. bool isLegalAddressingMode(unsigned NumBytes, int64_t Offset, unsigned Scale) const;

Decrement the SP, issuing probes along the way. TargetReg is the new top of the stack. FrameSetup is passed as true, if the allocation is a part

Definition at line 11101 of file AArch64InstrInfo.cpp.

References AArch64InstrInfo(), llvm::MachineInstrBuilder::addImm(), llvm::MachineInstrBuilder::addMBB(), llvm::MachineInstrBuilder::addReg(), llvm::MachineBasicBlock::addSuccessor(), assert(), llvm::MachineBasicBlock::begin(), llvm::BuildMI(), llvm::MachineFunction::CreateMachineBasicBlock(), llvm::RegState::Define, DL, llvm::emitFrameOffset(), llvm::MachineBasicBlock::end(), llvm::MachineInstr::FrameSetup, llvm::fullyRecomputeLiveIns(), llvm::AArch64_AM::getArithExtendImm(), llvm::StackOffset::getFixed(), llvm::MachineFunction::getInfo(), llvm::MachineFunction::getRegInfo(), llvm::AArch64_AM::getShifterImm(), llvm::MachineFunction::getSubtarget(), llvm::MachineFunction::insert(), llvm::AArch64CC::LE, llvm::AArch64_AM::LSL, MBB, MBBI, llvm::MachineInstr::NoFlags, llvm::MachineRegisterInfo::reservedRegsFrozen(), llvm::MachineInstrBuilder::setMIFlags(), llvm::MachineBasicBlock::splice(), TII, llvm::MachineBasicBlock::transferSuccessorsAndUpdatePHIs(), and llvm::AArch64_AM::UXTX.

removeBranch()

reverseBranchCondition()

shouldClusterMemOps()

Detect opportunities for ldp/stp formation.

Only called for LdSt for which getMemOperandWithOffset returns true.

Definition at line 5193 of file AArch64InstrInfo.cpp.

References assert(), canPairLdStOpc(), llvm::ArrayRef< T >::front(), llvm::MachineFunction::getFrameInfo(), llvm::MachineOperand::getImm(), llvm::MachineOperand::getIndex(), llvm::MachineInstr::getOpcode(), llvm::MachineInstr::getOperand(), llvm::MachineBasicBlock::getParent(), llvm::MachineInstr::getParent(), llvm::MachineOperand::getParent(), llvm::MachineOperand::getReg(), llvm::MachineOperand::getType(), hasUnscaledLdStOffset(), isCandidateToMergeOrPair(), llvm::MachineOperand::isFI(), llvm::MachineOperand::isIdenticalTo(), isPairableLdStInst(), llvm::MachineOperand::isReg(), scaleOffset(), shouldClusterFI(), and llvm::ArrayRef< T >::size().

storeRegToStackSlot()

Definition at line 5854 of file AArch64InstrInfo.cpp.

References llvm::MachineInstrBuilder::addFrameIndex(), llvm::MachineInstrBuilder::addReg(), assert(), llvm::BuildMI(), llvm::MachineRegisterInfo::constrainRegClass(), llvm::dwarf_linker::DebugLoc, llvm::TargetStackID::Default, llvm::get(), llvm::MachinePointerInfo::getFixedStack(), llvm::MachineFunction::getFrameInfo(), llvm::getKillRegState(), llvm::MachineFunction::getMachineMemOperand(), llvm::MachineFrameInfo::getObjectAlign(), llvm::MachineFrameInfo::getObjectSize(), llvm::MachineFunction::getRegInfo(), getRegisterInfo(), llvm::RegState::Implicit, llvm::MCRegister::isValid(), llvm::Register::isVirtual(), MBB, MBBI, MI, llvm::MachineMemOperand::MOStore, llvm::MCRegister::NoRegister, llvm::Offset, Opc, llvm::TargetStackID::ScalablePredicateVector, llvm::TargetStackID::ScalableVector, llvm::MachineFrameInfo::setStackID(), and storeRegPairToStackSlot().

Referenced by foldMemoryOperandImpl().

suppressLdStPair()

void AArch64InstrInfo::suppressLdStPair ( MachineInstr & MI) static

The documentation for this class was generated from the following files: