MLIR: include/mlir/ExecutionEngine/CRunnerUtils.h Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15 #ifndef MLIR_EXECUTIONENGINE_CRUNNERUTILS_H

16 #define MLIR_EXECUTIONENGINE_CRUNNERUTILS_H

17

18 #ifdef _WIN32

19 #ifndef MLIR_CRUNNERUTILS_EXPORT

20 #ifdef mlir_c_runner_utils_EXPORTS

21

22 #define MLIR_CRUNNERUTILS_EXPORT __declspec(dllexport)

23 #define MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS

24 #else

25

26 #define MLIR_CRUNNERUTILS_EXPORT __declspec(dllimport)

27 #endif

28 #endif

29 #else

30

31 #define MLIR_CRUNNERUTILS_EXPORT __attribute__((visibility("default")))

32 #define MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS

33 #endif

34

35 #include

36 #include

37 #include

38 #include <initializer_list>

39 #include

40

41

42

43

44 namespace mlir {

45 namespace detail {

46

47 constexpr bool isPowerOf2(int n) { return (!(n & (n - 1))); }

48

51 }

52

53 template <typename T, int Dim, bool IsPowerOf2>

55

56 template <typename T, int Dim>

57 struct Vector1D<T, Dim, true> {

60 "size error");

61 }

62 inline T &operator[](unsigned i) { return vector[i]; }

63 inline const T &operator[](unsigned i) const { return vector[i]; }

64

65 private:

66 T vector[Dim];

67 };

68

69

70

71 template <typename T, int Dim>

72 struct Vector1D<T, Dim, false> {

74 static_assert(nextPowerOf2(sizeof(T[Dim])) > sizeof(T[Dim]), "size error");

75 static_assert(nextPowerOf2(sizeof(T[Dim])) < 2 * sizeof(T[Dim]),

76 "size error");

77 }

78 inline T &operator[](unsigned i) { return vector[i]; }

79 inline const T &operator[](unsigned i) const { return vector[i]; }

80

81 private:

82 T vector[Dim];

83 char padding[nextPowerOf2(sizeof(T[Dim])) - sizeof(T[Dim])];

84 };

85 }

86 }

87

88

89 template <typename T, int Dim, int... Dims>

91 inline Vector<T, Dims...> &operator[](unsigned i) { return vector[i]; }

93 return vector[i];

94 }

95

96 private:

97 Vector<T, Dims...> vector[Dim];

98 };

99

100

101

102 template <typename T, int Dim>

105 mlir::detail::isPowerOf2(sizeof(T[Dim]))> {

106 };

107

108 template <int D1, typename T>

110 template <int D1, int D2, typename T>

112 template <int D1, int D2, int D3, typename T>

114 template <int D1, int D2, int D3, int D4, typename T>

116

117 template

118 void dropFront(int64_t arr[N], int64_t *res) {

119 for (unsigned i = 1; i < N; ++i)

120 *(res + i - 1) = arr[i];

121 }

122

123

124

125

126 template <typename T, int Rank>

128

129

130 template <typename T, int N>

137

138 template <typename Range,

139 typename sfinae = decltype(std::declval().begin())>

141 assert(indices.size() == N &&

142 "indices should match rank in memref subscript");

143 int64_t curOffset = offset;

144 for (int dim = N - 1; dim >= 0; --dim) {

145 int64_t currentIndex = *(indices.begin() + dim);

146 assert(currentIndex < sizes[dim] && "Index overflow");

147 curOffset += currentIndex * strides[dim];

148 }

149 return data[curOffset];

150 }

151

154

155

159 res.data = data;

161 dropFront(sizes, res.sizes);

162 dropFront(strides, res.strides);

163 return res;

164 }

165 };

166

167

168 template

175

176 template <typename Range,

177 typename sfinae = decltype(std::declval().begin())>

179 assert(indices.size() == 1 &&

180 "indices should match rank in memref subscript");

181 return (*this)[*indices.begin()];

182 }

183

186

188 };

189

190

191 template

196

197 template <typename Range,

198 typename sfinae = decltype(std::declval().begin())>

200 assert((indices.size() == 0) &&

201 "Expect empty indices for 0-rank memref subscript");

203 }

204

207 };

208

209

210 template <typename T, int Rank>

212 public:

218

220 int64_t offset = 0)

221 : offset(offset), descriptor(&descriptor) {}

223 int dim = Rank - 1;

224 while (dim >= 0 && indices[dim] == (descriptor->sizes[dim] - 1)) {

225 offset -= indices[dim] * descriptor->strides[dim];

226 indices[dim] = 0;

227 --dim;

228 }

229 if (dim < 0) {

230 offset = -1;

231 return *this;

232 }

233 ++indices[dim];

234 offset += descriptor->strides[dim];

235 return *this;

236 }

237

240

241 const std::array<int64_t, Rank> &getIndices() { return indices; }

242

244 return other.offset == offset && other.descriptor == descriptor;

245 }

246

248 return !(*this == other);

249 }

250

251 private:

252

253

254 int64_t offset = 0;

255

256

257 std::array<int64_t, Rank> indices = {};

258

259

261 };

262

263

264 template

266 public:

272

274 : elt(descriptor.data + offset) {}

275

277 ++elt;

278 return *this;

279 }

280

283

284

285

287

288

289 static const std::array<int64_t, 0> indices = {};

290 return indices;

291 }

292

294 return other.elt == elt;

295 }

296

298 return !(*this == other);

299 }

300

301 private:

302

303 T *elt;

304 };

305

306

307

308

309

310 template

314 };

315

316

317

318

319 template

321

322

323 template

325 public:

332

336 template

344 data = desc->data;

345 offset = desc->offset;

346 sizes = rank == 0 ? nullptr : desc->sizes;

348 }

349

350 template <typename Range,

351 typename sfinae = decltype(std::declval().begin())>

353 assert(indices.size() == rank &&

354 "indices should match rank in memref subscript");

355 if (rank == 0)

357

358 int64_t curOffset = offset;

359 for (int dim = rank - 1; dim >= 0; --dim) {

360 int64_t currentIndex = *(indices.begin() + dim);

361 assert(currentIndex < sizes[dim] && "Index overflow");

362 curOffset += currentIndex * strides[dim];

363 }

364 return data[curOffset];

365 }

366

369

370

372 assert(rank > 0 && "can't make a subscript of a zero ranked array");

373

379 return res;

380 }

381

382

383

385 assert(rank == 0 && "not a zero-ranked memRef");

387 }

388 };

389

390

391 template

393 public:

399

401 : offset(offset), descriptor(&descriptor) {

402 indices.resize(descriptor.rank, 0);

403 }

404

406 if (descriptor->rank == 0) {

407 offset = -1;

408 return *this;

409 }

410

411 int dim = descriptor->rank - 1;

412

413 while (dim >= 0 && indices[dim] == (descriptor->sizes[dim] - 1)) {

414 offset -= indices[dim] * descriptor->strides[dim];

415 indices[dim] = 0;

416 --dim;

417 }

418

419 if (dim < 0) {

420 offset = -1;

421 return *this;

422 }

423

424 ++indices[dim];

425 offset += descriptor->strides[dim];

426 return *this;

427 }

428

431

432 const std::vector<int64_t> &getIndices() { return indices; }

433

435 return other.offset == offset && other.descriptor == descriptor;

436 }

437

439 return !(*this == other);

440 }

441

442 private:

443

444

445 int64_t offset = 0;

446

447

448 std::vector<int64_t> indices = {};

449

450

452 };

453

454

455

456

460

461

462

463

473

474

475

476

479

480

481

482

483

485

486

488

490

491

492

495

496

497

498

505 #endif

MLIR_CRUNNERUTILS_EXPORT void printI64(int64_t i)

MLIR_CRUNNERUTILS_EXPORT void printF32(float f)

MLIR_CRUNNERUTILS_EXPORT void printString(char const *s)

MLIR_CRUNNERUTILS_EXPORT void printU64(uint64_t u)

MLIR_CRUNNERUTILS_EXPORT void memrefCopy(int64_t elemSize, ::UnrankedMemRefType< char > *src, ::UnrankedMemRefType< char > *dst)

MLIR_CRUNNERUTILS_EXPORT void printNewline()

MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_stdSortF64(uint64_t n, StridedMemRefType< double, 1 > *vref)

MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_stdSortI64(uint64_t n, StridedMemRefType< int64_t, 1 > *vref)

MLIR_CRUNNERUTILS_EXPORT void * rtsrand(uint64_t s)

#define MLIR_CRUNNERUTILS_EXPORT

MLIR_CRUNNERUTILS_EXPORT void printOpen()

MLIR_CRUNNERUTILS_EXPORT void printFlops(double flops)

MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_stdSortF32(uint64_t n, StridedMemRefType< float, 1 > *vref)

void dropFront(int64_t arr[N], int64_t *res)

MLIR_CRUNNERUTILS_EXPORT void rtdrand(void *g)

MLIR_CRUNNERUTILS_EXPORT double rtclock()

MLIR_CRUNNERUTILS_EXPORT void printClose()

MLIR_CRUNNERUTILS_EXPORT uint64_t rtrand(void *g, uint64_t m)

MLIR_CRUNNERUTILS_EXPORT void printF64(double d)

MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_shuffle(StridedMemRefType< uint64_t, 1 > *mref, void *g)

MLIR_CRUNNERUTILS_EXPORT void printComma()

Iterate over all elements in a dynamic memref.

std::ptrdiff_t difference_type

DynamicMemRefIterator< T > & operator++()

bool operator!=(const DynamicMemRefIterator &other) const

bool operator==(const DynamicMemRefIterator &other) const

DynamicMemRefIterator(DynamicMemRefType< T > &descriptor, int64_t offset=0)

std::forward_iterator_tag iterator_category

const std::vector< int64_t > & getIndices()

DynamicMemRefIterator< T > begin()

DynamicMemRefType(const ::UnrankedMemRefType< T > &memRef)

DynamicMemRefType(const StridedMemRefType< T, 0 > &memRef)

T & operator[](Range &&indices)

DynamicMemRefIterator< T > end()

DynamicMemRefType< T > operator[](int64_t idx)

DynamicMemRefType(const StridedMemRefType< T, N > &memRef)

Iterate over all elements in a 0-ranked strided memref.

bool operator!=(const StridedMemrefIterator &other) const

bool operator==(const StridedMemrefIterator &other) const

std::ptrdiff_t difference_type

StridedMemrefIterator< T, 0 > & operator++()

StridedMemrefIterator(StridedMemRefType< T, 0 > &descriptor, int64_t offset=0)

const std::array< int64_t, 0 > & getIndices()

std::forward_iterator_tag iterator_category

Iterate over all elements in a strided memref.

const std::array< int64_t, Rank > & getIndices()

bool operator!=(const StridedMemrefIterator &other) const

StridedMemrefIterator< T, Rank > & operator++()

StridedMemrefIterator(StridedMemRefType< T, Rank > &descriptor, int64_t offset=0)

bool operator==(const StridedMemrefIterator &other) const

std::ptrdiff_t difference_type

std::forward_iterator_tag iterator_category

constexpr bool isPowerOf2(int n)

constexpr unsigned nextPowerOf2(int n)

Include the generated interface declarations.

StridedMemRef descriptor type specialized for rank 0.

StridedMemrefIterator< T, 0 > end()

StridedMemrefIterator< T, 0 > begin()

T & operator[](Range indices)

StridedMemRef descriptor type specialized for rank 1.

T & operator[](int64_t idx)

StridedMemrefIterator< T, 1 > begin()

StridedMemrefIterator< T, 1 > end()

T & operator[](Range indices)

StridedMemRef descriptor type with static rank.

StridedMemRefType< T, N - 1 > operator[](int64_t idx)

StridedMemrefIterator< T, N > begin()

T & operator[](Range &&indices)

StridedMemrefIterator< T, N > end()

Vector< T, Dims... > & operator[](unsigned i)

const Vector< T, Dims... > & operator[](unsigned i) const

const T & operator[](unsigned i) const

T & operator[](unsigned i)

const T & operator[](unsigned i) const

T & operator[](unsigned i)