#include "AArch64GenInstrInfo.inc"">

LLVM: lib/Target/AArch64/AArch64InstrInfo.h File Reference (original) (raw)

#include "[AArch64.h](AArch64%5F8h%5Fsource.html)"
#include "[AArch64RegisterInfo.h](AArch64RegisterInfo%5F8h%5Fsource.html)"
#include "[llvm/CodeGen/TargetInstrInfo.h](TargetInstrInfo%5F8h%5Fsource.html)"
#include "[llvm/Support/TypeSize.h](TypeSize%5F8h%5Fsource.html)"
#include <optional>
#include "AArch64GenInstrInfo.inc"

Go to the source code of this file.

Namespaces
namespace llvm
This is an optimization pass for GlobalISel generic memory operations.
namespace llvm::AArch64
Macros
#define GET_INSTRINFO_HEADER
#define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
#define GET_INSTRINFO_HELPER_DECLS
#define TSFLAG_ELEMENT_SIZE_TYPE(X)
#define TSFLAG_DESTRUCTIVE_INST_TYPE(X)
#define TSFLAG_FALSE_LANE_TYPE(X)
#define TSFLAG_INSTR_FLAGS(X)
#define TSFLAG_SME_MATRIX_TYPE(X)
Enumerations
enum llvm::AArch64MachineCombinerPattern : unsigned { llvm::SUBADD_OP1 = MachineCombinerPattern::TARGET_PATTERN_START , llvm::SUBADD_OP2, llvm::MULADDW_OP1, llvm::MULADDW_OP2, llvm::MULSUBW_OP1, llvm::MULSUBW_OP2, llvm::MULADDWI_OP1, llvm::MULSUBWI_OP1, llvm::MULADDX_OP1, llvm::MULADDX_OP2, llvm::MULSUBX_OP1, llvm::MULSUBX_OP2, llvm::MULADDXI_OP1, llvm::MULSUBXI_OP1, llvm::MULADDv8i8_OP1, llvm::MULADDv8i8_OP2, llvm::MULADDv16i8_OP1, llvm::MULADDv16i8_OP2, llvm::MULADDv4i16_OP1, llvm::MULADDv4i16_OP2, llvm::MULADDv8i16_OP1, llvm::MULADDv8i16_OP2, llvm::MULADDv2i32_OP1, llvm::MULADDv2i32_OP2, llvm::MULADDv4i32_OP1, llvm::MULADDv4i32_OP2, llvm::MULSUBv8i8_OP1, llvm::MULSUBv8i8_OP2, llvm::MULSUBv16i8_OP1, llvm::MULSUBv16i8_OP2, llvm::MULSUBv4i16_OP1, llvm::MULSUBv4i16_OP2, llvm::MULSUBv8i16_OP1, llvm::MULSUBv8i16_OP2, llvm::MULSUBv2i32_OP1, llvm::MULSUBv2i32_OP2, llvm::MULSUBv4i32_OP1, llvm::MULSUBv4i32_OP2, llvm::MULADDv4i16_indexed_OP1, llvm::MULADDv4i16_indexed_OP2, llvm::MULADDv8i16_indexed_OP1, llvm::MULADDv8i16_indexed_OP2, llvm::MULADDv2i32_indexed_OP1, llvm::MULADDv2i32_indexed_OP2, llvm::MULADDv4i32_indexed_OP1, llvm::MULADDv4i32_indexed_OP2, llvm::MULSUBv4i16_indexed_OP1, llvm::MULSUBv4i16_indexed_OP2, llvm::MULSUBv8i16_indexed_OP1, llvm::MULSUBv8i16_indexed_OP2, llvm::MULSUBv2i32_indexed_OP1, llvm::MULSUBv2i32_indexed_OP2, llvm::MULSUBv4i32_indexed_OP1, llvm::MULSUBv4i32_indexed_OP2, llvm::FMULADDH_OP1, llvm::FMULADDH_OP2, llvm::FMULSUBH_OP1, llvm::FMULSUBH_OP2, llvm::FMULADDS_OP1, llvm::FMULADDS_OP2, llvm::FMULSUBS_OP1, llvm::FMULSUBS_OP2, llvm::FMULADDD_OP1, llvm::FMULADDD_OP2, llvm::FMULSUBD_OP1, llvm::FMULSUBD_OP2, llvm::FNMULSUBH_OP1, llvm::FNMULSUBS_OP1, llvm::FNMULSUBD_OP1, llvm::FMLAv1i32_indexed_OP1, llvm::FMLAv1i32_indexed_OP2, llvm::FMLAv1i64_indexed_OP1, llvm::FMLAv1i64_indexed_OP2, llvm::FMLAv4f16_OP1, llvm::FMLAv4f16_OP2, llvm::FMLAv8f16_OP1, llvm::FMLAv8f16_OP2, llvm::FMLAv2f32_OP2, llvm::FMLAv2f32_OP1, llvm::FMLAv2f64_OP1, llvm::FMLAv2f64_OP2, llvm::FMLAv4i16_indexed_OP1, llvm::FMLAv4i16_indexed_OP2, llvm::FMLAv8i16_indexed_OP1, llvm::FMLAv8i16_indexed_OP2, llvm::FMLAv2i32_indexed_OP1, llvm::FMLAv2i32_indexed_OP2, llvm::FMLAv2i64_indexed_OP1, llvm::FMLAv2i64_indexed_OP2, llvm::FMLAv4f32_OP1, llvm::FMLAv4f32_OP2, llvm::FMLAv4i32_indexed_OP1, llvm::FMLAv4i32_indexed_OP2, llvm::FMLSv1i32_indexed_OP2, llvm::FMLSv1i64_indexed_OP2, llvm::FMLSv4f16_OP1, llvm::FMLSv4f16_OP2, llvm::FMLSv8f16_OP1, llvm::FMLSv8f16_OP2, llvm::FMLSv2f32_OP1, llvm::FMLSv2f32_OP2, llvm::FMLSv2f64_OP1, llvm::FMLSv2f64_OP2, llvm::FMLSv4i16_indexed_OP1, llvm::FMLSv4i16_indexed_OP2, llvm::FMLSv8i16_indexed_OP1, llvm::FMLSv8i16_indexed_OP2, llvm::FMLSv2i32_indexed_OP1, llvm::FMLSv2i32_indexed_OP2, llvm::FMLSv2i64_indexed_OP1, llvm::FMLSv2i64_indexed_OP2, llvm::FMLSv4f32_OP1, llvm::FMLSv4f32_OP2, llvm::FMLSv4i32_indexed_OP1, llvm::FMLSv4i32_indexed_OP2, llvm::FMULv2i32_indexed_OP1, llvm::FMULv2i32_indexed_OP2, llvm::FMULv2i64_indexed_OP1, llvm::FMULv2i64_indexed_OP2, llvm::FMULv4i16_indexed_OP1, llvm::FMULv4i16_indexed_OP2, llvm::FMULv4i32_indexed_OP1, llvm::FMULv4i32_indexed_OP2, llvm::FMULv8i16_indexed_OP1, llvm::FMULv8i16_indexed_OP2, llvm::FNMADD, llvm::GATHER_LANE_i32, llvm::GATHER_LANE_i16, llvm::GATHER_LANE_i8 }
enum llvm::AArch64FrameOffsetStatus { llvm::AArch64FrameOffsetCannotUpdate = 0x0 , llvm::AArch64FrameOffsetIsLegal = 0x1 , llvm::AArch64FrameOffsetCanUpdate = 0x2 }
Use to report the frame offset status in isAArch64FrameOffsetLegal. More...
enum llvm::AArch64::ElementSizeType { llvm::AArch64::ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7) , llvm::AArch64::ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0) , llvm::AArch64::ElementSizeB = TSFLAG_ELEMENT_SIZE_TYPE(0x1) , llvm::AArch64::ElementSizeH = TSFLAG_ELEMENT_SIZE_TYPE(0x2) , llvm::AArch64::ElementSizeS = TSFLAG_ELEMENT_SIZE_TYPE(0x3) , llvm::AArch64::ElementSizeD = TSFLAG_ELEMENT_SIZE_TYPE(0x4) }
enum llvm::AArch64::DestructiveInstType { llvm::AArch64::DestructiveInstTypeMask = TSFLAG_DESTRUCTIVE_INST_TYPE(0xf) , llvm::AArch64::NotDestructive = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0) , llvm::AArch64::DestructiveOther = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1) , llvm::AArch64::DestructiveUnary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x2) , llvm::AArch64::DestructiveBinaryImm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x3) , llvm::AArch64::DestructiveBinaryShImmUnpred = TSFLAG_DESTRUCTIVE_INST_TYPE(0x4) , llvm::AArch64::DestructiveBinary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x5) , llvm::AArch64::DestructiveBinaryComm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x6) , llvm::AArch64::DestructiveBinaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x7) , llvm::AArch64::DestructiveTernaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x8) , llvm::AArch64::Destructive2xRegImmUnpred = TSFLAG_DESTRUCTIVE_INST_TYPE(0x9) , llvm::AArch64::DestructiveUnaryPassthru = TSFLAG_DESTRUCTIVE_INST_TYPE(0xa) }
enum llvm::AArch64::FalseLaneType { llvm::AArch64::FalseLanesMask = TSFLAG_FALSE_LANE_TYPE(0x3) , llvm::AArch64::FalseLanesZero = TSFLAG_FALSE_LANE_TYPE(0x1) , llvm::AArch64::FalseLanesUndef = TSFLAG_FALSE_LANE_TYPE(0x2) }
enum llvm::AArch64::SMEMatrixType { llvm::AArch64::SMEMatrixTypeMask = TSFLAG_SME_MATRIX_TYPE(0x7) , llvm::AArch64::SMEMatrixNone = TSFLAG_SME_MATRIX_TYPE(0x0) , llvm::AArch64::SMEMatrixTileB = TSFLAG_SME_MATRIX_TYPE(0x1) , llvm::AArch64::SMEMatrixTileH = TSFLAG_SME_MATRIX_TYPE(0x2) , llvm::AArch64::SMEMatrixTileS = TSFLAG_SME_MATRIX_TYPE(0x3) , llvm::AArch64::SMEMatrixTileD = TSFLAG_SME_MATRIX_TYPE(0x4) , llvm::AArch64::SMEMatrixTileQ = TSFLAG_SME_MATRIX_TYPE(0x5) , llvm::AArch64::SMEMatrixArray = TSFLAG_SME_MATRIX_TYPE(0x6) }
Functions
std::optional< UsedNZCV > llvm::examineCFlagsUse (MachineInstr &MI, MachineInstr &CmpInstr, const TargetRegisterInfo &TRI, SmallVectorImpl< MachineInstr * > *CCUseInstrs=nullptr)
bool llvm::isNZCVTouchedInInstructionRange (const MachineInstr &DefMI, const MachineInstr &UseMI, const TargetRegisterInfo *TRI)
Return true if there is an instruction /after/ DefMI and before UseMI which either reads or clobbers NZCV.
MCCFIInstruction llvm::createDefCFA (const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
MCCFIInstruction llvm::createCFAOffset (const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA, std::optional< int64_t > IncomingVGOffsetFromDefCFA)
void llvm::emitFrameOffset (MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
bool llvm::rewriteAArch64FrameIndex (MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
int llvm::isAArch64FrameOffsetLegal (const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
static bool llvm::isUncondBranchOpcode (int Opc)
static bool llvm::isCondBranchOpcode (int Opc)
static bool llvm::isIndirectBranchOpcode (int Opc)
static bool llvm::isIndirectCallOpcode (unsigned Opc)
static bool llvm::isPTrueOpcode (unsigned Opc)
unsigned llvm::getBLRCallOpcode (const MachineFunction &MF)
Return opcode to be used for indirect calls.
static unsigned llvm::getXPACOpcodeForKey (AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
static unsigned llvm::getAUTOpcodeForKey (AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn't take a discriminator operand, using zero instead.
static unsigned llvm::getPACOpcodeForKey (AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn't take a discriminator operand, using zero instead.
int llvm::AArch64::getSVEPseudoMap (uint16_t Opcode)
int llvm::AArch64::getSVERevInstr (uint16_t Opcode)
int llvm::AArch64::getSVENonRevInstr (uint16_t Opcode)
int llvm::AArch64::getSMEPseudoMap (uint16_t Opcode)

FALKOR_STRIDED_ACCESS_MD

#define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"

GET_INSTRINFO_HEADER

#define GET_INSTRINFO_HEADER

GET_INSTRINFO_HELPER_DECLS

#define GET_INSTRINFO_HELPER_DECLS

TSFLAG_DESTRUCTIVE_INST_TYPE

#define TSFLAG_DESTRUCTIVE_INST_TYPE ( X )

Value:

((X) << 3)

static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")

Definition at line 811 of file AArch64InstrInfo.h.

TSFLAG_ELEMENT_SIZE_TYPE

#define TSFLAG_ELEMENT_SIZE_TYPE ( X )

TSFLAG_FALSE_LANE_TYPE

#define TSFLAG_FALSE_LANE_TYPE ( X )

TSFLAG_INSTR_FLAGS

#define TSFLAG_INSTR_FLAGS ( X )

TSFLAG_SME_MATRIX_TYPE

#define TSFLAG_SME_MATRIX_TYPE ( X )