LLVM: lib/IR/Operator.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

17

19

20namespace llvm {

23 case Instruction::Add:

24 case Instruction::Sub:

25 case Instruction::Mul:

26 case Instruction::Shl: {

27 auto *OBO = cast(this);

28 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();

29 }

30 case Instruction::Trunc: {

31 if (auto *TI = dyn_cast(this))

32 return TI->hasNoUnsignedWrap() || TI->hasNoSignedWrap();

33 return false;

34 }

35 case Instruction::UDiv:

36 case Instruction::SDiv:

37 case Instruction::AShr:

38 case Instruction::LShr:

39 return cast(this)->isExact();

40 case Instruction::Or:

41 return cast(this)->isDisjoint();

42 case Instruction::GetElementPtr: {

43 auto *GEP = cast(this);

44

46 GEP->getInRange() != std::nullopt;

47 }

48 case Instruction::UIToFP:

49 case Instruction::ZExt:

50 if (auto *NNI = dyn_cast(this))

51 return NNI->hasNonNeg();

52 return false;

53 case Instruction::ICmp:

54 return cast(this)->hasSameSign();

55 default:

56 if (const auto *FP = dyn_cast(this))

57 return FP->hasNoNaNs() || FP->hasNoInfs();

58 return false;

59 }

60}

61

64 return true;

65 auto *I = dyn_cast(this);

66 return I && (I->hasPoisonGeneratingReturnAttributes() ||

67 I->hasPoisonGeneratingMetadata());

68}

69

71 if (auto *I = dyn_cast(this))

72 return I->getSourceElementType();

73 return cast(this)->getSourceElementType();

74}

75

77 if (auto *I = dyn_cast(this))

78 return I->getResultElementType();

79 return cast(this)->getResultElementType();

80}

81

83 if (auto *CE = dyn_cast(this))

84 return CE->getInRange();

85 return std::nullopt;

86}

87

89

90

91

94 GTI != GTE; ++GTI) {

96 ConstantInt *OpC = dyn_cast(GTI.getOperand());

97

98 if (StructType *STy = GTI.getStructTypeOrNull()) {

101 } else {

102 assert(GTI.isSequential() && "should be sequencial");

103

104

106 Offset = GTI.getSequentialElementStride(DL) * ElemCount;

107 }

109 }

110 return Result;

111}

112

118 "The offset bit width does not match DL specification.");

121 DL, Offset, ExternalAnalysis);

122}

123

127

128 if (SourceType->isIntegerTy(8) && Index.empty() && !ExternalAnalysis) {

129 auto *CI = dyn_cast(Index.front());

130 if (CI && CI->getType()->isIntegerTy()) {

131 Offset += CI->getValue().sextOrTrunc(Offset.getBitWidth());

132 return true;

133 }

134 return false;

135 }

136

137 bool UsedExternalAnalysis = false;

140

141 APInt IndexedSize(Offset.getBitWidth(), Size, false,

142 true);

143

144 if (!UsedExternalAnalysis) {

146 } else {

147

148

149 bool Overflow = false;

150 APInt OffsetPlus = Index.smul_ov(IndexedSize, Overflow);

151 if (Overflow)

152 return false;

153 Offset = Offset.sadd_ov(OffsetPlus, Overflow);

154 if (Overflow)

155 return false;

156 }

157 return true;

158 };

160 SourceType, Index.begin());

162 for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {

163

165

166 Value *V = GTI.getOperand();

167 StructType *STy = GTI.getStructTypeOrNull();

168

169 auto *ConstOffset = dyn_cast(V);

170 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {

171 if (ConstOffset->isZero())

172 continue;

173

174

175 if (ScalableType)

176 return false;

177

178 if (STy) {

179 unsigned ElementIdx = ConstOffset->getZExtValue();

181

182 if (!AccumulateOffset(

184 1))

185 return false;

186 continue;

187 }

188 if (!AccumulateOffset(ConstOffset->getValue(),

189 GTI.getSequentialElementStride(DL)))

190 return false;

191 continue;

192 }

193

194

195

196 if (!ExternalAnalysis || STy || ScalableType)

197 return false;

198 APInt AnalysisIndex;

199 if (!ExternalAnalysis(*V, AnalysisIndex))

200 return false;

201 UsedExternalAnalysis = true;

202 if (!AccumulateOffset(AnalysisIndex, GTI.getSequentialElementStride(DL)))

203 return false;

204 }

205 return true;

206}

207

211 APInt &ConstantOffset) const {

213 "The offset bit width does not match DL specification.");

214

217

219 true);

220 ConstantOffset += Index * IndexedSize;

221 };

222

224 GTI != GTE; ++GTI) {

225

226 bool ScalableType = GTI.getIndexedType()->isScalableTy();

227

228 Value *V = GTI.getOperand();

229 StructType *STy = GTI.getStructTypeOrNull();

230

231 auto *ConstOffset = dyn_cast(V);

232 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {

233 if (ConstOffset->isZero())

234 continue;

235

236

237

238

239

240 if (ScalableType)

241 return false;

242

243 if (STy) {

244 unsigned ElementIdx = ConstOffset->getZExtValue();

246

248 1);

249 continue;

250 }

251 CollectConstantOffset(ConstOffset->getValue(),

252 GTI.getSequentialElementStride(DL));

253 continue;

254 }

255

256 if (STy || ScalableType)

257 return false;

258

259 APInt IndexedSize(BitWidth, GTI.getSequentialElementStride(DL),

260 false, true);

261

262

263 if (!IndexedSize.isZero()) {

265 It->second += IndexedSize;

266 }

267 }

268 return true;

269}

270

272 if (all())

273 O << " fast";

274 else {

276 O << " reassoc";

278 O << " nnan";

280 O << " ninf";

282 O << " nsz";

284 O << " arcp";

286 O << " contract";

288 O << " afn";

289 }

290}

291}

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

Class for arbitrary precision integers.

bool isZero() const

Determine if this value is zero, i.e. all bits are clear.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

This is the shared class of boolean and integer constants.

uint64_t getZExtValue() const

Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...

A parsed version of the target data layout string in and methods for querying it.

bool noSignedZeros() const

bool allowReciprocal() const

void print(raw_ostream &O) const

Print fast-math flags to O.

bool allowReassoc() const

Flag queries.

bool allowContract() const

static GEPNoWrapFlags none()

bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const

Collect the offset of this GEP as a map of Values to their associated APInt multipliers,...

std::optional< ConstantRange > getInRange() const

Returns the offset of the index with an inrange attachment, or std::nullopt if none.

Type * getSourceElementType() const

Type * getResultElementType() const

bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset, function_ref< bool(Value &, APInt &)> ExternalAnalysis=nullptr) const

Accumulate the constant address offset of this GEP if possible.

Align getMaxPreservedAlignment(const DataLayout &DL) const

Compute the maximum alignment that this GEP is garranteed to preserve.

unsigned getPointerAddressSpace() const

Method to return the address space of the pointer operand.

std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)

bool hasPoisonGeneratingAnnotations() const

Return true if this operator has poison-generating flags, return attributes or metadata.

bool hasPoisonGeneratingFlags() const

Return true if this operator has flags which may cause this operator to evaluate to poison despite ha...

unsigned getOpcode() const

Return the opcode for this Instruction or ConstantExpr.

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...

TypeSize getElementOffset(unsigned Idx) const

Class to represent struct types.

The instances of the Type class are immutable: once they are created, they are never changed.

bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const

Return true if this is a type whose size is a known multiple of vscale.

bool isIntegerTy() const

True if this is an instance of IntegerType.

iterator_range< value_op_iterator > operand_values()

LLVM Value Representation.

static constexpr uint64_t MaximumAlignment

An efficient, type-erasing, non-owning reference to a callable.

Type * getIndexedType() const

This class implements an extremely fast bulk output stream that can only output to a stream.

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

gep_type_iterator gep_type_end(const User *GEP)

constexpr T MinAlign(U A, V B)

A and B are either alignments or offsets.

constexpr unsigned BitWidth

gep_type_iterator gep_type_begin(const User *GEP)

This struct is a compact representation of a valid (non-zero power of two) alignment.

A MapVector that performs no allocations if smaller than a certain size.