LLVM: lib/Support/BLAKE3/blake3_avx512.c Source File (original) (raw)

2

3#include <immintrin.h>

4

5#define _mm_shuffle_ps2(a, b, c) \

6 (_mm_castps_si128( \

7 _mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), (c))))

8

10 return _mm_loadu_si128((const __m128i *)src);

11}

12

14 return _mm256_loadu_si256((const __m256i *)src);

15}

16

18 return _mm512_loadu_si512((const __m512i *)src);

19}

20

22 _mm_storeu_si128((__m128i *)dest, src);

23}

24

26 _mm256_storeu_si256((__m256i *)dest, src);

27}

28

30 _mm512_storeu_si512((__m512i *)dest, src);

31}

32

33INLINE __m128i add_128(__m128i a, __m128i b) { return _mm_add_epi32(a, b); }

34

35INLINE __m256i add_256(__m256i a, __m256i b) { return _mm256_add_epi32(a, b); }

36

37INLINE __m512i add_512(__m512i a, __m512i b) { return _mm512_add_epi32(a, b); }

38

39INLINE __m128i xor_128(__m128i a, __m128i b) { return _mm_xor_si128(a, b); }

40

41INLINE __m256i xor_256(__m256i a, __m256i b) { return _mm256_xor_si256(a, b); }

42

43INLINE __m512i xor_512(__m512i a, __m512i b) { return _mm512_xor_si512(a, b); }

44

46

48

50

52 return _mm_setr_epi32((int32_t)a, (int32_t)b, (int32_t)c, (int32_t)d);

53}

54

55INLINE __m128i rot16_128(__m128i x) { return _mm_ror_epi32(x, 16); }

56

57INLINE __m256i rot16_256(__m256i x) { return _mm256_ror_epi32(x, 16); }

58

59INLINE __m512i rot16_512(__m512i x) { return _mm512_ror_epi32(x, 16); }

60

61INLINE __m128i rot12_128(__m128i x) { return _mm_ror_epi32(x, 12); }

62

63INLINE __m256i rot12_256(__m256i x) { return _mm256_ror_epi32(x, 12); }

64

65INLINE __m512i rot12_512(__m512i x) { return _mm512_ror_epi32(x, 12); }

66

67INLINE __m128i rot8_128(__m128i x) { return _mm_ror_epi32(x, 8); }

68

69INLINE __m256i rot8_256(__m256i x) { return _mm256_ror_epi32(x, 8); }

70

71INLINE __m512i rot8_512(__m512i x) { return _mm512_ror_epi32(x, 8); }

72

73INLINE __m128i rot7_128(__m128i x) { return _mm_ror_epi32(x, 7); }

74

75INLINE __m256i rot7_256(__m256i x) { return _mm256_ror_epi32(x, 7); }

76

77INLINE __m512i rot7_512(__m512i x) { return _mm512_ror_epi32(x, 7); }

78

79

80

81

82

83

84

85INLINE void g1(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,

86 __m128i m) {

88 *row3 = xor_128(*row3, *row0);

90 *row2 = add_128(*row2, *row3);

91 *row1 = xor_128(*row1, *row2);

93}

94

95INLINE void g2(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,

96 __m128i m) {

98 *row3 = xor_128(*row3, *row0);

100 *row2 = add_128(*row2, *row3);

101 *row1 = xor_128(*row1, *row2);

103}

104

105

106

107

109 *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(2, 1, 0, 3));

110 *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));

111 *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(0, 3, 2, 1));

112}

113

115 *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(0, 3, 2, 1));

116 *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));

117 *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(2, 1, 0, 3));

118}

119

128

133

134 __m128i t0, t1, t2, t3, tt;

135

136

137

138 t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(2, 0, 2, 0));

139 g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);

140 t1 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 3, 1));

141 g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);

142 diagonalize(&rows[0], &rows[2], &rows[3]);

143 t2 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(2, 0, 2, 0));

144 t2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2, 1, 0, 3));

145 g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);

146 t3 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 1, 3, 1));

147 t3 = _mm_shuffle_epi32(t3, _MM_SHUFFLE(2, 1, 0, 3));

148 g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);

150 m0 = t0;

151 m1 = t1;

152 m2 = t2;

153 m3 = t3;

154

155

156

158 t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));

159 g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);

161 tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));

162 t1 = _mm_blend_epi16(tt, t1, 0xCC);

163 g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);

164 diagonalize(&rows[0], &rows[2], &rows[3]);

165 t2 = _mm_unpacklo_epi64(m3, m1);

166 tt = _mm_blend_epi16(t2, m2, 0xC0);

167 t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));

168 g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);

169 t3 = _mm_unpackhi_epi32(m1, m3);

170 tt = _mm_unpacklo_epi32(m2, t3);

171 t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));

172 g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);

174 m0 = t0;

175 m1 = t1;

176 m2 = t2;

177 m3 = t3;

178

179

181 t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));

182 g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);

184 tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));

185 t1 = _mm_blend_epi16(tt, t1, 0xCC);

186 g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);

187 diagonalize(&rows[0], &rows[2], &rows[3]);

188 t2 = _mm_unpacklo_epi64(m3, m1);

189 tt = _mm_blend_epi16(t2, m2, 0xC0);

190 t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));

191 g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);

192 t3 = _mm_unpackhi_epi32(m1, m3);

193 tt = _mm_unpacklo_epi32(m2, t3);

194 t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));

195 g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);

197 m0 = t0;

198 m1 = t1;

199 m2 = t2;

200 m3 = t3;

201

202

204 t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));

205 g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);

207 tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));

208 t1 = _mm_blend_epi16(tt, t1, 0xCC);

209 g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);

210 diagonalize(&rows[0], &rows[2], &rows[3]);

211 t2 = _mm_unpacklo_epi64(m3, m1);

212 tt = _mm_blend_epi16(t2, m2, 0xC0);

213 t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));

214 g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);

215 t3 = _mm_unpackhi_epi32(m1, m3);

216 tt = _mm_unpacklo_epi32(m2, t3);

217 t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));

218 g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);

220 m0 = t0;

221 m1 = t1;

222 m2 = t2;

223 m3 = t3;

224

225

227 t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));

228 g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);

230 tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));

231 t1 = _mm_blend_epi16(tt, t1, 0xCC);

232 g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);

233 diagonalize(&rows[0], &rows[2], &rows[3]);

234 t2 = _mm_unpacklo_epi64(m3, m1);

235 tt = _mm_blend_epi16(t2, m2, 0xC0);

236 t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));

237 g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);

238 t3 = _mm_unpackhi_epi32(m1, m3);

239 tt = _mm_unpacklo_epi32(m2, t3);

240 t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));

241 g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);

243 m0 = t0;

244 m1 = t1;

245 m2 = t2;

246 m3 = t3;

247

248

250 t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));

251 g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);

253 tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));

254 t1 = _mm_blend_epi16(tt, t1, 0xCC);

255 g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);

256 diagonalize(&rows[0], &rows[2], &rows[3]);

257 t2 = _mm_unpacklo_epi64(m3, m1);

258 tt = _mm_blend_epi16(t2, m2, 0xC0);

259 t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));

260 g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);

261 t3 = _mm_unpackhi_epi32(m1, m3);

262 tt = _mm_unpacklo_epi32(m2, t3);

263 t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));

264 g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);

266 m0 = t0;

267 m1 = t1;

268 m2 = t2;

269 m3 = t3;

270

271

273 t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));

274 g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);

276 tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));

277 t1 = _mm_blend_epi16(tt, t1, 0xCC);

278 g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);

279 diagonalize(&rows[0], &rows[2], &rows[3]);

280 t2 = _mm_unpacklo_epi64(m3, m1);

281 tt = _mm_blend_epi16(t2, m2, 0xC0);

282 t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));

283 g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);

284 t3 = _mm_unpackhi_epi32(m1, m3);

285 tt = _mm_unpacklo_epi32(m2, t3);

286 t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));

287 g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);

289}

290

295 __m128i rows[4];

301}

302

307 __m128i rows[4];

311}

312

313

314

315

316

317

318

324 v[0] = add_128(v[0], v[4]);

325 v[1] = add_128(v[1], v[5]);

326 v[2] = add_128(v[2], v[6]);

327 v[3] = add_128(v[3], v[7]);

328 v[12] = xor_128(v[12], v[0]);

329 v[13] = xor_128(v[13], v[1]);

330 v[14] = xor_128(v[14], v[2]);

331 v[15] = xor_128(v[15], v[3]);

336 v[8] = add_128(v[8], v[12]);

337 v[9] = add_128(v[9], v[13]);

338 v[10] = add_128(v[10], v[14]);

339 v[11] = add_128(v[11], v[15]);

340 v[4] = xor_128(v[4], v[8]);

341 v[5] = xor_128(v[5], v[9]);

342 v[6] = xor_128(v[6], v[10]);

343 v[7] = xor_128(v[7], v[11]);

352 v[0] = add_128(v[0], v[4]);

353 v[1] = add_128(v[1], v[5]);

354 v[2] = add_128(v[2], v[6]);

355 v[3] = add_128(v[3], v[7]);

356 v[12] = xor_128(v[12], v[0]);

357 v[13] = xor_128(v[13], v[1]);

358 v[14] = xor_128(v[14], v[2]);

359 v[15] = xor_128(v[15], v[3]);

364 v[8] = add_128(v[8], v[12]);

365 v[9] = add_128(v[9], v[13]);

366 v[10] = add_128(v[10], v[14]);

367 v[11] = add_128(v[11], v[15]);

368 v[4] = xor_128(v[4], v[8]);

369 v[5] = xor_128(v[5], v[9]);

370 v[6] = xor_128(v[6], v[10]);

371 v[7] = xor_128(v[7], v[11]);

376

381 v[0] = add_128(v[0], v[5]);

382 v[1] = add_128(v[1], v[6]);

383 v[2] = add_128(v[2], v[7]);

384 v[3] = add_128(v[3], v[4]);

385 v[15] = xor_128(v[15], v[0]);

386 v[12] = xor_128(v[12], v[1]);

387 v[13] = xor_128(v[13], v[2]);

388 v[14] = xor_128(v[14], v[3]);

393 v[10] = add_128(v[10], v[15]);

394 v[11] = add_128(v[11], v[12]);

395 v[8] = add_128(v[8], v[13]);

396 v[9] = add_128(v[9], v[14]);

397 v[5] = xor_128(v[5], v[10]);

398 v[6] = xor_128(v[6], v[11]);

399 v[7] = xor_128(v[7], v[8]);

400 v[4] = xor_128(v[4], v[9]);

409 v[0] = add_128(v[0], v[5]);

410 v[1] = add_128(v[1], v[6]);

411 v[2] = add_128(v[2], v[7]);

412 v[3] = add_128(v[3], v[4]);

413 v[15] = xor_128(v[15], v[0]);

414 v[12] = xor_128(v[12], v[1]);

415 v[13] = xor_128(v[13], v[2]);

416 v[14] = xor_128(v[14], v[3]);

421 v[10] = add_128(v[10], v[15]);

422 v[11] = add_128(v[11], v[12]);

423 v[8] = add_128(v[8], v[13]);

424 v[9] = add_128(v[9], v[14]);

425 v[5] = xor_128(v[5], v[10]);

426 v[6] = xor_128(v[6], v[11]);

427 v[7] = xor_128(v[7], v[8]);

428 v[4] = xor_128(v[4], v[9]);

433}

434

436

437

438

439 __m128i ab_01 = _mm_unpacklo_epi32(vecs[0], vecs[1]);

440 __m128i ab_23 = _mm_unpackhi_epi32(vecs[0], vecs[1]);

441 __m128i cd_01 = _mm_unpacklo_epi32(vecs[2], vecs[3]);

442 __m128i cd_23 = _mm_unpackhi_epi32(vecs[2], vecs[3]);

443

444

445 __m128i abcd_0 = _mm_unpacklo_epi64(ab_01, cd_01);

446 __m128i abcd_1 = _mm_unpackhi_epi64(ab_01, cd_01);

447 __m128i abcd_2 = _mm_unpacklo_epi64(ab_23, cd_23);

448 __m128i abcd_3 = _mm_unpackhi_epi64(ab_23, cd_23);

449

450 vecs[0] = abcd_0;

451 vecs[1] = abcd_1;

452 vecs[2] = abcd_2;

453 vecs[3] = abcd_3;

454}

455

457 size_t block_offset, __m128i out[16]) {

458 out[0] = loadu_128(&inputs[0][block_offset + 0 * sizeof(__m128i)]);

459 out[1] = loadu_128(&inputs[1][block_offset + 0 * sizeof(__m128i)]);

460 out[2] = loadu_128(&inputs[2][block_offset + 0 * sizeof(__m128i)]);

461 out[3] = loadu_128(&inputs[3][block_offset + 0 * sizeof(__m128i)]);

462 out[4] = loadu_128(&inputs[0][block_offset + 1 * sizeof(__m128i)]);

463 out[5] = loadu_128(&inputs[1][block_offset + 1 * sizeof(__m128i)]);

464 out[6] = loadu_128(&inputs[2][block_offset + 1 * sizeof(__m128i)]);

465 out[7] = loadu_128(&inputs[3][block_offset + 1 * sizeof(__m128i)]);

466 out[8] = loadu_128(&inputs[0][block_offset + 2 * sizeof(__m128i)]);

467 out[9] = loadu_128(&inputs[1][block_offset + 2 * sizeof(__m128i)]);

468 out[10] = loadu_128(&inputs[2][block_offset + 2 * sizeof(__m128i)]);

469 out[11] = loadu_128(&inputs[3][block_offset + 2 * sizeof(__m128i)]);

470 out[12] = loadu_128(&inputs[0][block_offset + 3 * sizeof(__m128i)]);

471 out[13] = loadu_128(&inputs[1][block_offset + 3 * sizeof(__m128i)]);

472 out[14] = loadu_128(&inputs[2][block_offset + 3 * sizeof(__m128i)]);

473 out[15] = loadu_128(&inputs[3][block_offset + 3 * sizeof(__m128i)]);

474 for (size_t i = 0; i < 4; ++i) {

475 _mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);

476 }

481}

482

484 __m128i *out_lo, __m128i *out_hi) {

485 uint64_t mask = (increment_counter ? ~0 : 0);

486 __m256i mask_vec = _mm256_set1_epi64x(mask);

487 __m256i deltas = _mm256_setr_epi64x(0, 1, 2, 3);

488 deltas = _mm256_and_si256(mask_vec, deltas);

489 __m256i counters =

490 _mm256_add_epi64(_mm256_set1_epi64x((int64_t)counter), deltas);

491 *out_lo = _mm256_cvtepi64_epi32(counters);

492 *out_hi = _mm256_cvtepi64_epi32(_mm256_srli_epi64(counters, 32));

493}

494

495static

498 bool increment_counter, uint8_t flags,

500 __m128i h_vecs[8] = {

503 };

504 __m128i counter_low_vec, counter_high_vec;

505 load_counters4(counter, increment_counter, &counter_low_vec,

506 &counter_high_vec);

507 uint8_t block_flags = flags | flags_start;

508

511 block_flags |= flags_end;

512 }

514 __m128i block_flags_vec = set1_128(block_flags);

515 __m128i msg_vecs[16];

517

518 __m128i v[16] = {

519 h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],

520 h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],

522 counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,

523 };

531 h_vecs[0] = xor_128(v[0], v[8]);

532 h_vecs[1] = xor_128(v[1], v[9]);

533 h_vecs[2] = xor_128(v[2], v[10]);

534 h_vecs[3] = xor_128(v[3], v[11]);

535 h_vecs[4] = xor_128(v[4], v[12]);

536 h_vecs[5] = xor_128(v[5], v[13]);

537 h_vecs[6] = xor_128(v[6], v[14]);

538 h_vecs[7] = xor_128(v[7], v[15]);

539

540 block_flags = flags;

541 }

542

545

546

547 storeu_128(h_vecs[0], &out[0 * sizeof(__m128i)]);

548 storeu_128(h_vecs[4], &out[1 * sizeof(__m128i)]);

549 storeu_128(h_vecs[1], &out[2 * sizeof(__m128i)]);

550 storeu_128(h_vecs[5], &out[3 * sizeof(__m128i)]);

551 storeu_128(h_vecs[2], &out[4 * sizeof(__m128i)]);

552 storeu_128(h_vecs[6], &out[5 * sizeof(__m128i)]);

553 storeu_128(h_vecs[3], &out[6 * sizeof(__m128i)]);

554 storeu_128(h_vecs[7], &out[7 * sizeof(__m128i)]);

555}

556

557static

562 __m128i h_vecs[8] = {

565 };

568 __m128i msg_vecs[16];

569 for (size_t i = 0; i < 16; i++) {

570 msg_vecs[i] = set1_128(block_words[i]);

571 }

572 __m128i counter_low_vec, counter_high_vec;

573 load_counters4(counter, true, &counter_low_vec, &counter_high_vec);

574 __m128i block_len_vec = set1_128(block_len);

575 __m128i block_flags_vec = set1_128(flags);

576 __m128i v[16] = {

577 h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],

578 h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],

580 counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,

581 };

589 for (size_t i = 0; i < 8; i++) {

590 v[i] = xor_128(v[i], v[i+8]);

591 v[i+8] = xor_128(v[i+8], h_vecs[i]);

592 }

597 for (size_t i = 0; i < 4; i++) {

598 storeu_128(v[i+ 0], &out[(4*i+0) * sizeof(__m128i)]);

599 storeu_128(v[i+ 4], &out[(4*i+1) * sizeof(__m128i)]);

600 storeu_128(v[i+ 8], &out[(4*i+2) * sizeof(__m128i)]);

601 storeu_128(v[i+12], &out[(4*i+3) * sizeof(__m128i)]);

602 }

603}

604

605

606

607

608

609

610

616 v[0] = add_256(v[0], v[4]);

617 v[1] = add_256(v[1], v[5]);

618 v[2] = add_256(v[2], v[6]);

619 v[3] = add_256(v[3], v[7]);

620 v[12] = xor_256(v[12], v[0]);

621 v[13] = xor_256(v[13], v[1]);

622 v[14] = xor_256(v[14], v[2]);

623 v[15] = xor_256(v[15], v[3]);

628 v[8] = add_256(v[8], v[12]);

629 v[9] = add_256(v[9], v[13]);

630 v[10] = add_256(v[10], v[14]);

631 v[11] = add_256(v[11], v[15]);

632 v[4] = xor_256(v[4], v[8]);

633 v[5] = xor_256(v[5], v[9]);

634 v[6] = xor_256(v[6], v[10]);

635 v[7] = xor_256(v[7], v[11]);

644 v[0] = add_256(v[0], v[4]);

645 v[1] = add_256(v[1], v[5]);

646 v[2] = add_256(v[2], v[6]);

647 v[3] = add_256(v[3], v[7]);

648 v[12] = xor_256(v[12], v[0]);

649 v[13] = xor_256(v[13], v[1]);

650 v[14] = xor_256(v[14], v[2]);

651 v[15] = xor_256(v[15], v[3]);

656 v[8] = add_256(v[8], v[12]);

657 v[9] = add_256(v[9], v[13]);

658 v[10] = add_256(v[10], v[14]);

659 v[11] = add_256(v[11], v[15]);

660 v[4] = xor_256(v[4], v[8]);

661 v[5] = xor_256(v[5], v[9]);

662 v[6] = xor_256(v[6], v[10]);

663 v[7] = xor_256(v[7], v[11]);

668

673 v[0] = add_256(v[0], v[5]);

674 v[1] = add_256(v[1], v[6]);

675 v[2] = add_256(v[2], v[7]);

676 v[3] = add_256(v[3], v[4]);

677 v[15] = xor_256(v[15], v[0]);

678 v[12] = xor_256(v[12], v[1]);

679 v[13] = xor_256(v[13], v[2]);

680 v[14] = xor_256(v[14], v[3]);

685 v[10] = add_256(v[10], v[15]);

686 v[11] = add_256(v[11], v[12]);

687 v[8] = add_256(v[8], v[13]);

688 v[9] = add_256(v[9], v[14]);

689 v[5] = xor_256(v[5], v[10]);

690 v[6] = xor_256(v[6], v[11]);

691 v[7] = xor_256(v[7], v[8]);

692 v[4] = xor_256(v[4], v[9]);

701 v[0] = add_256(v[0], v[5]);

702 v[1] = add_256(v[1], v[6]);

703 v[2] = add_256(v[2], v[7]);

704 v[3] = add_256(v[3], v[4]);

705 v[15] = xor_256(v[15], v[0]);

706 v[12] = xor_256(v[12], v[1]);

707 v[13] = xor_256(v[13], v[2]);

708 v[14] = xor_256(v[14], v[3]);

713 v[10] = add_256(v[10], v[15]);

714 v[11] = add_256(v[11], v[12]);

715 v[8] = add_256(v[8], v[13]);

716 v[9] = add_256(v[9], v[14]);

717 v[5] = xor_256(v[5], v[10]);

718 v[6] = xor_256(v[6], v[11]);

719 v[7] = xor_256(v[7], v[8]);

720 v[4] = xor_256(v[4], v[9]);

725}

726

728

729

730 __m256i ab_0145 = _mm256_unpacklo_epi32(vecs[0], vecs[1]);

731 __m256i ab_2367 = _mm256_unpackhi_epi32(vecs[0], vecs[1]);

732 __m256i cd_0145 = _mm256_unpacklo_epi32(vecs[2], vecs[3]);

733 __m256i cd_2367 = _mm256_unpackhi_epi32(vecs[2], vecs[3]);

734 __m256i ef_0145 = _mm256_unpacklo_epi32(vecs[4], vecs[5]);

735 __m256i ef_2367 = _mm256_unpackhi_epi32(vecs[4], vecs[5]);

736 __m256i gh_0145 = _mm256_unpacklo_epi32(vecs[6], vecs[7]);

737 __m256i gh_2367 = _mm256_unpackhi_epi32(vecs[6], vecs[7]);

738

739

740

741 __m256i abcd_04 = _mm256_unpacklo_epi64(ab_0145, cd_0145);

742 __m256i abcd_15 = _mm256_unpackhi_epi64(ab_0145, cd_0145);

743 __m256i abcd_26 = _mm256_unpacklo_epi64(ab_2367, cd_2367);

744 __m256i abcd_37 = _mm256_unpackhi_epi64(ab_2367, cd_2367);

745 __m256i efgh_04 = _mm256_unpacklo_epi64(ef_0145, gh_0145);

746 __m256i efgh_15 = _mm256_unpackhi_epi64(ef_0145, gh_0145);

747 __m256i efgh_26 = _mm256_unpacklo_epi64(ef_2367, gh_2367);

748 __m256i efgh_37 = _mm256_unpackhi_epi64(ef_2367, gh_2367);

749

750

751 vecs[0] = _mm256_permute2x128_si256(abcd_04, efgh_04, 0x20);

752 vecs[1] = _mm256_permute2x128_si256(abcd_15, efgh_15, 0x20);

753 vecs[2] = _mm256_permute2x128_si256(abcd_26, efgh_26, 0x20);

754 vecs[3] = _mm256_permute2x128_si256(abcd_37, efgh_37, 0x20);

755 vecs[4] = _mm256_permute2x128_si256(abcd_04, efgh_04, 0x31);

756 vecs[5] = _mm256_permute2x128_si256(abcd_15, efgh_15, 0x31);

757 vecs[6] = _mm256_permute2x128_si256(abcd_26, efgh_26, 0x31);

758 vecs[7] = _mm256_permute2x128_si256(abcd_37, efgh_37, 0x31);

759}

760

762 size_t block_offset, __m256i out[16]) {

763 out[0] = loadu_256(&inputs[0][block_offset + 0 * sizeof(__m256i)]);

764 out[1] = loadu_256(&inputs[1][block_offset + 0 * sizeof(__m256i)]);

765 out[2] = loadu_256(&inputs[2][block_offset + 0 * sizeof(__m256i)]);

766 out[3] = loadu_256(&inputs[3][block_offset + 0 * sizeof(__m256i)]);

767 out[4] = loadu_256(&inputs[4][block_offset + 0 * sizeof(__m256i)]);

768 out[5] = loadu_256(&inputs[5][block_offset + 0 * sizeof(__m256i)]);

769 out[6] = loadu_256(&inputs[6][block_offset + 0 * sizeof(__m256i)]);

770 out[7] = loadu_256(&inputs[7][block_offset + 0 * sizeof(__m256i)]);

771 out[8] = loadu_256(&inputs[0][block_offset + 1 * sizeof(__m256i)]);

772 out[9] = loadu_256(&inputs[1][block_offset + 1 * sizeof(__m256i)]);

773 out[10] = loadu_256(&inputs[2][block_offset + 1 * sizeof(__m256i)]);

774 out[11] = loadu_256(&inputs[3][block_offset + 1 * sizeof(__m256i)]);

775 out[12] = loadu_256(&inputs[4][block_offset + 1 * sizeof(__m256i)]);

776 out[13] = loadu_256(&inputs[5][block_offset + 1 * sizeof(__m256i)]);

777 out[14] = loadu_256(&inputs[6][block_offset + 1 * sizeof(__m256i)]);

778 out[15] = loadu_256(&inputs[7][block_offset + 1 * sizeof(__m256i)]);

779 for (size_t i = 0; i < 8; ++i) {

780 _mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);

781 }

784}

785

787 __m256i *out_lo, __m256i *out_hi) {

788 uint64_t mask = (increment_counter ? ~0 : 0);

789 __m512i mask_vec = _mm512_set1_epi64(mask);

790 __m512i deltas = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7);

791 deltas = _mm512_and_si512(mask_vec, deltas);

792 __m512i counters =

793 _mm512_add_epi64(_mm512_set1_epi64((int64_t)counter), deltas);

794 *out_lo = _mm512_cvtepi64_epi32(counters);

795 *out_hi = _mm512_cvtepi64_epi32(_mm512_srli_epi64(counters, 32));

796}

797

798static

801 bool increment_counter, uint8_t flags,

803 __m256i h_vecs[8] = {

806 };

807 __m256i counter_low_vec, counter_high_vec;

808 load_counters8(counter, increment_counter, &counter_low_vec,

809 &counter_high_vec);

810 uint8_t block_flags = flags | flags_start;

811

814 block_flags |= flags_end;

815 }

817 __m256i block_flags_vec = set1_256(block_flags);

818 __m256i msg_vecs[16];

820

821 __m256i v[16] = {

822 h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],

823 h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],

825 counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,

826 };

834 h_vecs[0] = xor_256(v[0], v[8]);

835 h_vecs[1] = xor_256(v[1], v[9]);

836 h_vecs[2] = xor_256(v[2], v[10]);

837 h_vecs[3] = xor_256(v[3], v[11]);

838 h_vecs[4] = xor_256(v[4], v[12]);

839 h_vecs[5] = xor_256(v[5], v[13]);

840 h_vecs[6] = xor_256(v[6], v[14]);

841 h_vecs[7] = xor_256(v[7], v[15]);

842

843 block_flags = flags;

844 }

845

847 storeu_256(h_vecs[0], &out[0 * sizeof(__m256i)]);

848 storeu_256(h_vecs[1], &out[1 * sizeof(__m256i)]);

849 storeu_256(h_vecs[2], &out[2 * sizeof(__m256i)]);

850 storeu_256(h_vecs[3], &out[3 * sizeof(__m256i)]);

851 storeu_256(h_vecs[4], &out[4 * sizeof(__m256i)]);

852 storeu_256(h_vecs[5], &out[5 * sizeof(__m256i)]);

853 storeu_256(h_vecs[6], &out[6 * sizeof(__m256i)]);

854 storeu_256(h_vecs[7], &out[7 * sizeof(__m256i)]);

855}

856

857static

862 __m256i h_vecs[8] = {

865 };

868 __m256i msg_vecs[16];

869 for (size_t i = 0; i < 16; i++) {

870 msg_vecs[i] = set1_256(block_words[i]);

871 }

872 __m256i counter_low_vec, counter_high_vec;

873 load_counters8(counter, true, &counter_low_vec, &counter_high_vec);

874 __m256i block_len_vec = set1_256(block_len);

875 __m256i block_flags_vec = set1_256(flags);

876 __m256i v[16] = {

877 h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],

878 h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],

880 counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,

881 };

889 for (size_t i = 0; i < 8; i++) {

890 v[i] = xor_256(v[i], v[i+8]);

891 v[i+8] = xor_256(v[i+8], h_vecs[i]);

892 }

895 for (size_t i = 0; i < 8; i++) {

896 storeu_256(v[i+0], &out[(2*i+0) * sizeof(__m256i)]);

897 storeu_256(v[i+8], &out[(2*i+1) * sizeof(__m256i)]);

898 }

899}

900

901

902

903

904

905

906

912 v[0] = add_512(v[0], v[4]);

913 v[1] = add_512(v[1], v[5]);

914 v[2] = add_512(v[2], v[6]);

915 v[3] = add_512(v[3], v[7]);

916 v[12] = xor_512(v[12], v[0]);

917 v[13] = xor_512(v[13], v[1]);

918 v[14] = xor_512(v[14], v[2]);

919 v[15] = xor_512(v[15], v[3]);

924 v[8] = add_512(v[8], v[12]);

925 v[9] = add_512(v[9], v[13]);

926 v[10] = add_512(v[10], v[14]);

927 v[11] = add_512(v[11], v[15]);

928 v[4] = xor_512(v[4], v[8]);

929 v[5] = xor_512(v[5], v[9]);

930 v[6] = xor_512(v[6], v[10]);

931 v[7] = xor_512(v[7], v[11]);

940 v[0] = add_512(v[0], v[4]);

941 v[1] = add_512(v[1], v[5]);

942 v[2] = add_512(v[2], v[6]);

943 v[3] = add_512(v[3], v[7]);

944 v[12] = xor_512(v[12], v[0]);

945 v[13] = xor_512(v[13], v[1]);

946 v[14] = xor_512(v[14], v[2]);

947 v[15] = xor_512(v[15], v[3]);

952 v[8] = add_512(v[8], v[12]);

953 v[9] = add_512(v[9], v[13]);

954 v[10] = add_512(v[10], v[14]);

955 v[11] = add_512(v[11], v[15]);

956 v[4] = xor_512(v[4], v[8]);

957 v[5] = xor_512(v[5], v[9]);

958 v[6] = xor_512(v[6], v[10]);

959 v[7] = xor_512(v[7], v[11]);

964

969 v[0] = add_512(v[0], v[5]);

970 v[1] = add_512(v[1], v[6]);

971 v[2] = add_512(v[2], v[7]);

972 v[3] = add_512(v[3], v[4]);

973 v[15] = xor_512(v[15], v[0]);

974 v[12] = xor_512(v[12], v[1]);

975 v[13] = xor_512(v[13], v[2]);

976 v[14] = xor_512(v[14], v[3]);

981 v[10] = add_512(v[10], v[15]);

982 v[11] = add_512(v[11], v[12]);

983 v[8] = add_512(v[8], v[13]);

984 v[9] = add_512(v[9], v[14]);

985 v[5] = xor_512(v[5], v[10]);

986 v[6] = xor_512(v[6], v[11]);

987 v[7] = xor_512(v[7], v[8]);

988 v[4] = xor_512(v[4], v[9]);

997 v[0] = add_512(v[0], v[5]);

998 v[1] = add_512(v[1], v[6]);

999 v[2] = add_512(v[2], v[7]);

1000 v[3] = add_512(v[3], v[4]);

1001 v[15] = xor_512(v[15], v[0]);

1002 v[12] = xor_512(v[12], v[1]);

1003 v[13] = xor_512(v[13], v[2]);

1004 v[14] = xor_512(v[14], v[3]);

1009 v[10] = add_512(v[10], v[15]);

1010 v[11] = add_512(v[11], v[12]);

1011 v[8] = add_512(v[8], v[13]);

1012 v[9] = add_512(v[9], v[14]);

1013 v[5] = xor_512(v[5], v[10]);

1014 v[6] = xor_512(v[6], v[11]);

1015 v[7] = xor_512(v[7], v[8]);

1016 v[4] = xor_512(v[4], v[9]);

1021}

1022

1023

1024#define LO_IMM8 0x88

1025

1027 return _mm512_shuffle_i32x4(a, b, LO_IMM8);

1028}

1029

1030

1031#define HI_IMM8 0xdd

1032

1034 return _mm512_shuffle_i32x4(a, b, HI_IMM8);

1035}

1036

1038

1039

1040

1041 __m512i ab_0 = _mm512_unpacklo_epi32(vecs[0], vecs[1]);

1042 __m512i ab_2 = _mm512_unpackhi_epi32(vecs[0], vecs[1]);

1043 __m512i cd_0 = _mm512_unpacklo_epi32(vecs[2], vecs[3]);

1044 __m512i cd_2 = _mm512_unpackhi_epi32(vecs[2], vecs[3]);

1045 __m512i ef_0 = _mm512_unpacklo_epi32(vecs[4], vecs[5]);

1046 __m512i ef_2 = _mm512_unpackhi_epi32(vecs[4], vecs[5]);

1047 __m512i gh_0 = _mm512_unpacklo_epi32(vecs[6], vecs[7]);

1048 __m512i gh_2 = _mm512_unpackhi_epi32(vecs[6], vecs[7]);

1049 __m512i ij_0 = _mm512_unpacklo_epi32(vecs[8], vecs[9]);

1050 __m512i ij_2 = _mm512_unpackhi_epi32(vecs[8], vecs[9]);

1051 __m512i kl_0 = _mm512_unpacklo_epi32(vecs[10], vecs[11]);

1052 __m512i kl_2 = _mm512_unpackhi_epi32(vecs[10], vecs[11]);

1053 __m512i mn_0 = _mm512_unpacklo_epi32(vecs[12], vecs[13]);

1054 __m512i mn_2 = _mm512_unpackhi_epi32(vecs[12], vecs[13]);

1055 __m512i op_0 = _mm512_unpacklo_epi32(vecs[14], vecs[15]);

1056 __m512i op_2 = _mm512_unpackhi_epi32(vecs[14], vecs[15]);

1057

1058

1059

1060

1061

1062

1063 __m512i abcd_0 = _mm512_unpacklo_epi64(ab_0, cd_0);

1064 __m512i abcd_1 = _mm512_unpackhi_epi64(ab_0, cd_0);

1065 __m512i abcd_2 = _mm512_unpacklo_epi64(ab_2, cd_2);

1066 __m512i abcd_3 = _mm512_unpackhi_epi64(ab_2, cd_2);

1067 __m512i efgh_0 = _mm512_unpacklo_epi64(ef_0, gh_0);

1068 __m512i efgh_1 = _mm512_unpackhi_epi64(ef_0, gh_0);

1069 __m512i efgh_2 = _mm512_unpacklo_epi64(ef_2, gh_2);

1070 __m512i efgh_3 = _mm512_unpackhi_epi64(ef_2, gh_2);

1071 __m512i ijkl_0 = _mm512_unpacklo_epi64(ij_0, kl_0);

1072 __m512i ijkl_1 = _mm512_unpackhi_epi64(ij_0, kl_0);

1073 __m512i ijkl_2 = _mm512_unpacklo_epi64(ij_2, kl_2);

1074 __m512i ijkl_3 = _mm512_unpackhi_epi64(ij_2, kl_2);

1075 __m512i mnop_0 = _mm512_unpacklo_epi64(mn_0, op_0);

1076 __m512i mnop_1 = _mm512_unpackhi_epi64(mn_0, op_0);

1077 __m512i mnop_2 = _mm512_unpacklo_epi64(mn_2, op_2);

1078 __m512i mnop_3 = _mm512_unpackhi_epi64(mn_2, op_2);

1079

1080

1081

1082

1083 __m512i abcdefgh_0 = unpack_lo_128(abcd_0, efgh_0);

1084 __m512i abcdefgh_1 = unpack_lo_128(abcd_1, efgh_1);

1085 __m512i abcdefgh_2 = unpack_lo_128(abcd_2, efgh_2);

1086 __m512i abcdefgh_3 = unpack_lo_128(abcd_3, efgh_3);

1087 __m512i abcdefgh_4 = unpack_hi_128(abcd_0, efgh_0);

1088 __m512i abcdefgh_5 = unpack_hi_128(abcd_1, efgh_1);

1089 __m512i abcdefgh_6 = unpack_hi_128(abcd_2, efgh_2);

1090 __m512i abcdefgh_7 = unpack_hi_128(abcd_3, efgh_3);

1091 __m512i ijklmnop_0 = unpack_lo_128(ijkl_0, mnop_0);

1092 __m512i ijklmnop_1 = unpack_lo_128(ijkl_1, mnop_1);

1093 __m512i ijklmnop_2 = unpack_lo_128(ijkl_2, mnop_2);

1094 __m512i ijklmnop_3 = unpack_lo_128(ijkl_3, mnop_3);

1095 __m512i ijklmnop_4 = unpack_hi_128(ijkl_0, mnop_0);

1096 __m512i ijklmnop_5 = unpack_hi_128(ijkl_1, mnop_1);

1097 __m512i ijklmnop_6 = unpack_hi_128(ijkl_2, mnop_2);

1098 __m512i ijklmnop_7 = unpack_hi_128(ijkl_3, mnop_3);

1099

1100

1101 vecs[0] = unpack_lo_128(abcdefgh_0, ijklmnop_0);

1102 vecs[1] = unpack_lo_128(abcdefgh_1, ijklmnop_1);

1103 vecs[2] = unpack_lo_128(abcdefgh_2, ijklmnop_2);

1104 vecs[3] = unpack_lo_128(abcdefgh_3, ijklmnop_3);

1105 vecs[4] = unpack_lo_128(abcdefgh_4, ijklmnop_4);

1106 vecs[5] = unpack_lo_128(abcdefgh_5, ijklmnop_5);

1107 vecs[6] = unpack_lo_128(abcdefgh_6, ijklmnop_6);

1108 vecs[7] = unpack_lo_128(abcdefgh_7, ijklmnop_7);

1109 vecs[8] = unpack_hi_128(abcdefgh_0, ijklmnop_0);

1110 vecs[9] = unpack_hi_128(abcdefgh_1, ijklmnop_1);

1111 vecs[10] = unpack_hi_128(abcdefgh_2, ijklmnop_2);

1112 vecs[11] = unpack_hi_128(abcdefgh_3, ijklmnop_3);

1113 vecs[12] = unpack_hi_128(abcdefgh_4, ijklmnop_4);

1114 vecs[13] = unpack_hi_128(abcdefgh_5, ijklmnop_5);

1115 vecs[14] = unpack_hi_128(abcdefgh_6, ijklmnop_6);

1116 vecs[15] = unpack_hi_128(abcdefgh_7, ijklmnop_7);

1117}

1118

1120 size_t block_offset, __m512i out[16]) {

1121 out[0] = loadu_512(&inputs[0][block_offset]);

1122 out[1] = loadu_512(&inputs[1][block_offset]);

1123 out[2] = loadu_512(&inputs[2][block_offset]);

1124 out[3] = loadu_512(&inputs[3][block_offset]);

1125 out[4] = loadu_512(&inputs[4][block_offset]);

1126 out[5] = loadu_512(&inputs[5][block_offset]);

1127 out[6] = loadu_512(&inputs[6][block_offset]);

1128 out[7] = loadu_512(&inputs[7][block_offset]);

1129 out[8] = loadu_512(&inputs[8][block_offset]);

1130 out[9] = loadu_512(&inputs[9][block_offset]);

1131 out[10] = loadu_512(&inputs[10][block_offset]);

1132 out[11] = loadu_512(&inputs[11][block_offset]);

1133 out[12] = loadu_512(&inputs[12][block_offset]);

1134 out[13] = loadu_512(&inputs[13][block_offset]);

1135 out[14] = loadu_512(&inputs[14][block_offset]);

1136 out[15] = loadu_512(&inputs[15][block_offset]);

1137 for (size_t i = 0; i < 16; ++i) {

1138 _mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);

1139 }

1141}

1142

1144 __m512i *out_lo, __m512i *out_hi) {

1145 const __m512i mask = _mm512_set1_epi32(-(int32_t)increment_counter);

1146 const __m512i deltas = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);

1147 const __m512i masked_deltas = _mm512_and_si512(deltas, mask);

1148 const __m512i low_words = _mm512_add_epi32(

1149 _mm512_set1_epi32((int32_t)counter),

1150 masked_deltas);

1151

1152

1153

1154

1155

1156 const __m512i carries = _mm512_srli_epi32(

1157 _mm512_andnot_si512(

1158 low_words,

1159 _mm512_set1_epi32((int32_t)counter)),

1160 31);

1161 const __m512i high_words = _mm512_add_epi32(

1162 _mm512_set1_epi32((int32_t)(counter >> 32)),

1163 carries);

1164 *out_lo = low_words;

1165 *out_hi = high_words;

1166}

1167

1168static

1171 bool increment_counter, uint8_t flags,

1174 __m512i h_vecs[8] = {

1177 };

1178 __m512i counter_low_vec, counter_high_vec;

1179 load_counters16(counter, increment_counter, &counter_low_vec,

1180 &counter_high_vec);

1181 uint8_t block_flags = flags | flags_start;

1182

1185 block_flags |= flags_end;

1186 }

1188 __m512i block_flags_vec = set1_512(block_flags);

1189 __m512i msg_vecs[16];

1191

1192 __m512i v[16] = {

1193 h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],

1194 h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],

1196 counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,

1197 };

1205 h_vecs[0] = xor_512(v[0], v[8]);

1206 h_vecs[1] = xor_512(v[1], v[9]);

1207 h_vecs[2] = xor_512(v[2], v[10]);

1208 h_vecs[3] = xor_512(v[3], v[11]);

1209 h_vecs[4] = xor_512(v[4], v[12]);

1210 h_vecs[5] = xor_512(v[5], v[13]);

1211 h_vecs[6] = xor_512(v[6], v[14]);

1212 h_vecs[7] = xor_512(v[7], v[15]);

1213

1214 block_flags = flags;

1215 }

1216

1217

1218

1219

1220 __m512i padded[16] = {

1221 h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],

1222 h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],

1225 };

1227 _mm256_mask_storeu_epi32(&out[0 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[0]));

1228 _mm256_mask_storeu_epi32(&out[1 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[1]));

1229 _mm256_mask_storeu_epi32(&out[2 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[2]));

1230 _mm256_mask_storeu_epi32(&out[3 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[3]));

1231 _mm256_mask_storeu_epi32(&out[4 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[4]));

1232 _mm256_mask_storeu_epi32(&out[5 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[5]));

1233 _mm256_mask_storeu_epi32(&out[6 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[6]));

1234 _mm256_mask_storeu_epi32(&out[7 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[7]));

1235 _mm256_mask_storeu_epi32(&out[8 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[8]));

1236 _mm256_mask_storeu_epi32(&out[9 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[9]));

1237 _mm256_mask_storeu_epi32(&out[10 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[10]));

1238 _mm256_mask_storeu_epi32(&out[11 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[11]));

1239 _mm256_mask_storeu_epi32(&out[12 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[12]));

1240 _mm256_mask_storeu_epi32(&out[13 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[13]));

1241 _mm256_mask_storeu_epi32(&out[14 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[14]));

1242 _mm256_mask_storeu_epi32(&out[15 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[15]));

1243}

1244

1245static

1250 __m512i h_vecs[8] = {

1253 };

1256 __m512i msg_vecs[16];

1257 for (size_t i = 0; i < 16; i++) {

1258 msg_vecs[i] = set1_512(block_words[i]);

1259 }

1260 __m512i counter_low_vec, counter_high_vec;

1261 load_counters16(counter, true, &counter_low_vec, &counter_high_vec);

1262 __m512i block_len_vec = set1_512(block_len);

1263 __m512i block_flags_vec = set1_512(flags);

1264 __m512i v[16] = {

1265 h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],

1266 h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],

1268 counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,

1269 };

1277 for (size_t i = 0; i < 8; i++) {

1278 v[i] = xor_512(v[i], v[i+8]);

1279 v[i+8] = xor_512(v[i+8], h_vecs[i]);

1280 }

1282 for (size_t i = 0; i < 16; i++) {

1283 storeu_512(v[i], &out[i * sizeof(__m512i)]);

1284 }

1285}

1286

1287

1288

1289

1290

1291

1292

1299 uint8_t block_flags = flags | flags_start;

1300 while (blocks > 0) {

1302 block_flags |= flags_end;

1303 }

1305 block_flags);

1308 block_flags = flags;

1309 }

1311}

1312

1315 uint64_t counter, bool increment_counter,

1318 while (num_inputs >= 16) {

1320 flags_start, flags_end, out);

1321 if (increment_counter) {

1322 counter += 16;

1323 }

1324 inputs += 16;

1325 num_inputs -= 16;

1327 }

1328 while (num_inputs >= 8) {

1330 flags_start, flags_end, out);

1331 if (increment_counter) {

1332 counter += 8;

1333 }

1334 inputs += 8;

1335 num_inputs -= 8;

1337 }

1338 while (num_inputs >= 4) {

1340 flags_start, flags_end, out);

1341 if (increment_counter) {

1342 counter += 4;

1343 }

1344 inputs += 4;

1345 num_inputs -= 4;

1347 }

1348 while (num_inputs > 0) {

1350 flags_end, out);

1351 if (increment_counter) {

1352 counter += 1;

1353 }

1354 inputs += 1;

1355 num_inputs -= 1;

1357 }

1358}

1359

1363 uint8_t* out, size_t outblocks) {

1364 while (outblocks >= 16) {

1366 counter += 16;

1367 outblocks -= 16;

1369 }

1370 while (outblocks >= 8) {

1372 counter += 8;

1373 outblocks -= 8;

1375 }

1376 while (outblocks >= 4) {

1378 counter += 4;

1379 outblocks -= 4;

1381 }

1382 while (outblocks > 0) {

1384 counter += 1;

1385 outblocks -= 1;

1387 }

1388}

bbsections Prepares for basic block by splitting functions into clusters of basic blocks

static constexpr unsigned long long mask(BlockVerifier::State S)

unify loop Fixup each natural loop to have a single exit block

INLINE __m128i rot16_128(__m128i x)

Definition blake3_avx512.c:55

INLINE __m512i rot8_512(__m512i x)

Definition blake3_avx512.c:71

INLINE __m128i set4(uint32_t a, uint32_t b, uint32_t c, uint32_t d)

Definition blake3_avx512.c:51

#define _mm_shuffle_ps2(a, b, c)

Definition blake3_avx512.c:5

INLINE __m256i set1_256(uint32_t x)

Definition blake3_avx512.c:47

INLINE void storeu_128(__m128i src, uint8_t dest[16])

Definition blake3_avx512.c:21

INLINE void storeu_256(__m256i src, uint8_t dest[32])

Definition blake3_avx512.c:25

INLINE void g1(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3, __m128i m)

Definition blake3_avx512.c:85

INLINE void storeu_512(__m512i src, uint8_t dest[64])

Definition blake3_avx512.c:29

INLINE __m128i set1_128(uint32_t x)

Definition blake3_avx512.c:45

INLINE __m512i set1_512(uint32_t x)

Definition blake3_avx512.c:49

INLINE __m256i rot8_256(__m256i x)

Definition blake3_avx512.c:69

INLINE __m512i loadu_512(const uint8_t src[64])

Definition blake3_avx512.c:17

INLINE void round_fn16(__m512i v[16], __m512i m[16], size_t r)

Definition blake3_avx512.c:907

INLINE __m256i rot7_256(__m256i x)

Definition blake3_avx512.c:75

INLINE __m512i rot16_512(__m512i x)

Definition blake3_avx512.c:59

INLINE void hash_one_avx512(const uint8_t *input, size_t blocks, const uint32_t key[8], uint64_t counter, uint8_t flags, uint8_t flags_start, uint8_t flags_end, uint8_t out[BLAKE3_OUT_LEN])

Definition blake3_avx512.c:1293

INLINE void transpose_vecs_128(__m128i vecs[4])

Definition blake3_avx512.c:435

INLINE __m128i rot8_128(__m128i x)

Definition blake3_avx512.c:67

INLINE void transpose_vecs_512(__m512i vecs[16])

Definition blake3_avx512.c:1037

INLINE __m512i add_512(__m512i a, __m512i b)

Definition blake3_avx512.c:37

INLINE __m256i xor_256(__m256i a, __m256i b)

Definition blake3_avx512.c:41

INLINE __m128i loadu_128(const uint8_t src[16])

Definition blake3_avx512.c:9

INLINE __m128i rot12_128(__m128i x)

Definition blake3_avx512.c:61

INLINE __m256i rot12_256(__m256i x)

Definition blake3_avx512.c:63

INLINE void load_counters8(uint64_t counter, bool increment_counter, __m256i *out_lo, __m256i *out_hi)

Definition blake3_avx512.c:786

INLINE __m128i add_128(__m128i a, __m128i b)

Definition blake3_avx512.c:33

static void blake3_hash16_avx512(const uint8_t *const *inputs, size_t blocks, const uint32_t key[8], uint64_t counter, bool increment_counter, uint8_t flags, uint8_t flags_start, uint8_t flags_end, uint8_t *out)

Definition blake3_avx512.c:1169

INLINE void diagonalize(__m128i *row0, __m128i *row2, __m128i *row3)

Definition blake3_avx512.c:108

INLINE void load_counters16(uint64_t counter, bool increment_counter, __m512i *out_lo, __m512i *out_hi)

Definition blake3_avx512.c:1143

INLINE __m256i rot16_256(__m256i x)

Definition blake3_avx512.c:57

INLINE __m512i unpack_lo_128(__m512i a, __m512i b)

Definition blake3_avx512.c:1026

INLINE void transpose_msg_vecs4(const uint8_t *const *inputs, size_t block_offset, __m128i out[16])

Definition blake3_avx512.c:456

INLINE void transpose_msg_vecs8(const uint8_t *const *inputs, size_t block_offset, __m256i out[16])

Definition blake3_avx512.c:761

static void blake3_xof8_avx512(const uint32_t cv[8], const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len, uint64_t counter, uint8_t flags, uint8_t out[8 *64])

Definition blake3_avx512.c:858

INLINE void undiagonalize(__m128i *row0, __m128i *row2, __m128i *row3)

Definition blake3_avx512.c:114

#define HI_IMM8

Definition blake3_avx512.c:1031

INLINE void compress_pre(__m128i rows[4], const uint32_t cv[8], const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len, uint64_t counter, uint8_t flags)

Definition blake3_avx512.c:120

INLINE void load_counters4(uint64_t counter, bool increment_counter, __m128i *out_lo, __m128i *out_hi)

Definition blake3_avx512.c:483

INLINE void g2(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3, __m128i m)

Definition blake3_avx512.c:95

#define LO_IMM8

Definition blake3_avx512.c:1024

static void blake3_hash8_avx512(const uint8_t *const *inputs, size_t blocks, const uint32_t key[8], uint64_t counter, bool increment_counter, uint8_t flags, uint8_t flags_start, uint8_t flags_end, uint8_t *out)

Definition blake3_avx512.c:799

static void blake3_xof4_avx512(const uint32_t cv[8], const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len, uint64_t counter, uint8_t flags, uint8_t out[4 *64])

Definition blake3_avx512.c:558

INLINE __m256i loadu_256(const uint8_t src[32])

Definition blake3_avx512.c:13

INLINE void round_fn4(__m128i v[16], __m128i m[16], size_t r)

Definition blake3_avx512.c:319

INLINE __m512i rot7_512(__m512i x)

Definition blake3_avx512.c:77

INLINE void transpose_vecs_256(__m256i vecs[8])

Definition blake3_avx512.c:727

static void blake3_xof16_avx512(const uint32_t cv[8], const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len, uint64_t counter, uint8_t flags, uint8_t out[16 *64])

Definition blake3_avx512.c:1246

INLINE void transpose_msg_vecs16(const uint8_t *const *inputs, size_t block_offset, __m512i out[16])

Definition blake3_avx512.c:1119

INLINE void round_fn8(__m256i v[16], __m256i m[16], size_t r)

Definition blake3_avx512.c:611

INLINE __m512i unpack_hi_128(__m512i a, __m512i b)

Definition blake3_avx512.c:1033

INLINE __m256i add_256(__m256i a, __m256i b)

Definition blake3_avx512.c:35

INLINE __m128i rot7_128(__m128i x)

Definition blake3_avx512.c:73

INLINE __m128i xor_128(__m128i a, __m128i b)

Definition blake3_avx512.c:39

INLINE __m512i xor_512(__m512i a, __m512i b)

Definition blake3_avx512.c:43

INLINE __m512i rot12_512(__m512i x)

Definition blake3_avx512.c:65

static void blake3_hash4_avx512(const uint8_t *const *inputs, size_t blocks, const uint32_t key[8], uint64_t counter, bool increment_counter, uint8_t flags, uint8_t flags_start, uint8_t flags_end, uint8_t *out)

Definition blake3_avx512.c:496

static const uint8_t MSG_SCHEDULE[7][16]

static const uint32_t IV[8]

INLINE uint32_t counter_high(uint64_t counter)

INLINE void load_block_words(const uint8_t block[BLAKE3_BLOCK_LEN], uint32_t block_words[16])

INLINE uint32_t counter_low(uint64_t counter)

#define blake3_hash_many_avx512

#define blake3_compress_xof_avx512

#define blake3_xof_many_avx512

#define blake3_compress_in_place_avx512