MLIR: lib/Dialect/Tosa/Utils/ConversionUtils.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

15

16 using namespace mlir;

18

22 utils::IteratorType::parallel);

23 }

24

28 for (auto value : values)

29 if (value)

30 condensedValues.push_back(value);

31 return condensedValues;

32 }

33

36 Value minValue = rewriter.createarith::MinimumFOp(loc, arg, max);

37 return rewriter.createarith::MaximumFOp(loc, minValue, min);

38 }

39

41 OpBuilder &rewriter, bool isUnsigned) {

42 if (isUnsigned) {

43 auto minOrArg = rewriter.createarith::MaxUIOp(loc, min, arg);

44 return rewriter.createarith::MinUIOp(loc, max, minOrArg);

45 }

46 auto minOrArg = rewriter.createarith::MaxSIOp(loc, min, arg);

47 return rewriter.createarith::MinSIOp(loc, max, minOrArg);

48 }

49

51 uint64_t bitwidth = ty.getIntOrFloatBitWidth();

52 if (ty.getSignedness() == IntegerType::Unsigned) {

53 uint64_t uvalue = value;

54 APInt intMin = APInt::getMinValue(bitwidth);

55 APInt intMax = APInt::getMaxValue(bitwidth);

56 return uvalue >= intMin.getZExtValue() && uvalue <= intMax.getZExtValue();

57 }

58

59 APInt intMin = APInt::getSignedMinValue(bitwidth);

60 APInt intMax = APInt::getSignedMaxValue(bitwidth);

61 return value >= intMin.getSExtValue() && value <= intMax.getSExtValue();

62 }

63

64 namespace {

65

66

67

68

69

70

71

72

73 LogicalResult

77

78 int64_t higherRank = higherRankShape.size();

79 int64_t lowerRank = lowerRankShape.size();

80 reshapeOutputShape.assign(higherRank, 1);

81

82 int64_t higherRankDim;

83 int64_t lowerRankDim;

84 const int64_t rankDiff = higherRank - lowerRank;

85

86 for (int64_t i = lowerRank - 1; i >= 0; i--) {

87 higherRankDim = higherRankShape[i + rankDiff];

88 lowerRankDim = lowerRankShape[i];

89

90 if (lowerRankDim != 1 && higherRankDim != 1 &&

91 lowerRankDim != higherRankDim)

92 return failure();

93

94 reshapeOutputShape[i + rankDiff] = lowerRankDim == 1 ? 1 : lowerRankDim;

95 }

96 return success();

97 }

98 }

99

104 }

105

108 auto input1Ty = llvm::dyn_cast(input1.getType());

109 auto input2Ty = llvm::dyn_cast(input2.getType());

110

111 if (!input1Ty || !input2Ty) {

112 return failure();

113 }

114

115 int64_t input1Rank = input1Ty.getRank();

116 int64_t input2Rank = input2Ty.getRank();

117

118 if (input1Rank == input2Rank)

119 return success();

120

121 Value higherTensorValue, lowerTensorValue;

122 if (input1Rank > input2Rank) {

123 higherTensorValue = input1;

124 lowerTensorValue = input2;

125 } else {

126 higherTensorValue = input2;

127 lowerTensorValue = input1;

128 }

129

131 llvm::cast(higherTensorValue.getType()).getShape();

133 llvm::cast(lowerTensorValue.getType()).getShape();

134

136

137 if (computeReshapeOutput(higherRankShape, lowerRankShape, reshapeOutputShape)

138 .failed())

139 return failure();

140

141 auto reshapeInputType =

142 llvm::cast(lowerTensorValue.getType());

144 ArrayRef<int64_t>(reshapeOutputShape), reshapeInputType.getElementType());

145 auto reshapeOutputShapeValue = getTosaConstShape(builder, reshapeOutputShape);

146

147 auto reshapeLower = builder.createtosa::ReshapeOp(

148 reshapeOutputType, lowerTensorValue, reshapeOutputShapeValue);

149

150 if (input1Rank > input2Rank) {

151 input1 = higherTensorValue;

152 input2 = reshapeLower.getResult();

153 } else {

154 input1 = reshapeLower.getResult();

155 input2 = higherTensorValue;

156 }

157

158 return success();

159 }

160

167 }

168

173 }

174

176 return to_vector(llvm::map_range(shape, [](int64_t dim) {

177 return ShapedType::isDynamic(dim) ? -1 : dim;

178 }));

179 }

180

183 if (!op) {

184 return false;

185 }

186 if (auto constOp = mlir::dyn_casttosa::ConstShapeOp(op)) {

187 Attribute constOpAttr = constOp->getAttr("values");

188 DenseElementsAttr elementsAttr = cast(constOpAttr);

189 for (int i = 0; i < elementsAttr.size(); i++) {

190 int64_t val = elementsAttr.getValues<int64_t>()[i];

191 result_shape.push_back(val);

192 }

193 return true;

194 }

195

196 return false;

197 }

198

199

203 int64_t v = attr.getSplatValue().getSExtValue();

205 }

206

207 if (auto int_array_attr = llvm::dyn_cast(attr)) {

209 for (APInt val : int_array_attr.getValues()) {

210 vec.push_back(val.getSExtValue());

211 }

212 return vec;

213 }

214 return {};

215 }

static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)

static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)

Attributes are known-constant values of operations.

DenseIntElementsAttr getIndexTensorAttr(ArrayRef< int64_t > values)

MLIRContext * getContext() const

An attribute that represents a reference to a dense vector or tensor object.

std::enable_if_t<!std::is_base_of< Attribute, T >::value||std::is_same< Attribute, T >::value, T > getSplatValue() const

Return the splat value for this attribute.

auto getValues() const

Return the held element values as a range of the given type.

int64_t size() const

Returns the number of elements held by this attribute.

bool isSplat() const

Returns true if this attribute corresponds to a splat, i.e.

ImplicitLocOpBuilder maintains a 'current location', allowing use of the create<> method without spec...

OpTy create(Args &&...args)

Create an operation of specific op type at the current insertion point and location.

This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...

This class helps build Operations.

Operation * create(const OperationState &state)

Creates an operation given the fields represented as an OperationState.

Operation is the basic unit of execution within MLIR.

OpResult getResult(unsigned idx)

Get the 'idx'th result of this operation.

A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...

This class represents an instance of an SSA value in the MLIR system, representing a computable value...

Type getType() const

Return the type of this value.

Value clampFloatHelper(Location loc, Value arg, Value min, Value max, OpBuilder &rewriter)

SmallVector< utils::IteratorType > getNParallelLoopsAttrs(unsigned nParallelLoops)

SmallVector< Value > condenseValues(const SmallVector< Value > &values)

LogicalResult EqualizeRanks(PatternRewriter &rewriter, Location loc, Value &input1, Value &input2)

Common code to create the reshape op where necessary to make the rank of two values equal.

SmallVector< int64_t > convertFromIntAttr(const DenseElementsAttr &attr, const int rank)

bool validIntegerRange(IntegerType ty, int64_t value)

Value getTosaConstShape(ImplicitLocOpBuilder &builder, llvm::ArrayRef< int64_t > shape)

SmallVector< int64_t > convertFromMlirShape(ArrayRef< int64_t > shape)

Value clampIntHelper(Location loc, Value arg, Value min, Value max, OpBuilder &rewriter, bool isUnsigned)

bool getConstShapeValues(Operation *op, llvm::SmallVector< int64_t > &result_shape)

Include the generated interface declarations.

auto get(MLIRContext *context, Ts &&...params)

Helper method that injects context only if needed, this helps unify some of the attribute constructio...