LLVM: include/llvm/TargetParser/RISCVTargetParser.h Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14#ifndef LLVM_TARGETPARSER_RISCVTARGETPARSER_H

15#define LLVM_TARGETPARSER_RISCVTARGETPARSER_H

16

20

21namespace llvm {

22

23class Triple;

24

25namespace RISCV {

26

27namespace RISCVExtensionBitmaskTable {

32};

33}

34

39};

40

48};

49

50

52

55 bool NeedPlus = false);

65

66}

67

68namespace RISCVII {

79

80enum {

84};

85}

86

87namespace RISCVVType {

88

90 return isPowerOf2_32(SEW) && SEW >= 8 && SEW <= 64;

91}

92

93

94inline static bool isValidLMUL(unsigned LMUL, bool Fractional) {

95 return isPowerOf2_32(LMUL) && LMUL <= 8 && (!Fractional || LMUL != 1);

96}

97

99 bool MaskAgnostic);

100

102 unsigned VLMUL = VType & 0x7;

104}

105

106

108

111 unsigned LmulLog2 = Log2_32(LMUL);

112 return static_cast<RISCVII::VLMUL>(Fractional ? 8 - LmulLog2 : LmulLog2);

113}

114

115inline static unsigned decodeVSEW(unsigned VSEW) {

116 assert(VSEW < 8 && "Unexpected VSEW value");

117 return 1 << (VSEW + 3);

118}

119

120inline static unsigned encodeSEW(unsigned SEW) {

123}

124

125inline static unsigned getSEW(unsigned VType) {

126 unsigned VSEW = (VType >> 3) & 0x7;

128}

129

130inline static bool isTailAgnostic(unsigned VType) { return VType & 0x40; }

131

132inline static bool isMaskAgnostic(unsigned VType) { return VType & 0x80; }

133

135

137

138std::optionalRISCVII::VLMUL

140}

141

142}

143

144#endif

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...

StringRef - Represent a constant reference to a string, i.e.

bool starts_with(StringRef Prefix) const

Check if this string starts with the given Prefix.

This class implements an extremely fast bulk output stream that can only output to a stream.

@ TAIL_UNDISTURBED_MASK_UNDISTURBED

static bool isTailAgnostic(unsigned VType)

static RISCVII::VLMUL getVLMUL(unsigned VType)

static unsigned decodeVSEW(unsigned VSEW)

std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)

unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)

static bool isValidLMUL(unsigned LMUL, bool Fractional)

static RISCVII::VLMUL encodeLMUL(unsigned LMUL, bool Fractional)

static bool isMaskAgnostic(unsigned VType)

static unsigned encodeSEW(unsigned SEW)

static bool isValidSEW(unsigned SEW)

void printVType(unsigned VType, raw_ostream &OS)

unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)

static unsigned getSEW(unsigned VType)

std::optional< RISCVII::VLMUL > getSameRatioLMUL(unsigned SEW, RISCVII::VLMUL VLMUL, unsigned EEW)

bool hasFastVectorUnalignedAccess(StringRef CPU)

void getFeaturesForCPU(StringRef CPU, SmallVectorImpl< std::string > &EnabledFeatures, bool NeedPlus=false)

void fillValidTuneCPUArchList(SmallVectorImpl< StringRef > &Values, bool IsRV64)

CPUModel getCPUModel(StringRef CPU)

StringRef getMArchFromMcpu(StringRef CPU)

bool parseCPU(StringRef CPU, bool IsRV64)

bool hasFastScalarUnalignedAccess(StringRef CPU)

static constexpr unsigned RVVBitsPerBlock

bool hasValidCPUModel(StringRef CPU)

bool parseTuneCPU(StringRef CPU, bool IsRV64)

void fillValidCPUArchList(SmallVectorImpl< StringRef > &Values, bool IsRV64)

This is an optimization pass for GlobalISel generic memory operations.

unsigned Log2_32(uint32_t Value)

Return the floor log base 2 of the specified value, -1 if the value is zero.

constexpr bool isPowerOf2_32(uint32_t Value)

Return true if the argument is a power of two > 0.

StringLiteral DefaultMarch

bool FastVectorUnalignedAccess

bool FastScalarUnalignedAccess