Path: blob/main/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512.c
35267 views
#include "blake3_impl.h"12#include <immintrin.h>34#define _mm_shuffle_ps2(a, b, c) \5(_mm_castps_si128( \6_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), (c))))78INLINE __m128i loadu_128(const uint8_t src[16]) {9return _mm_loadu_si128((const __m128i *)src);10}1112INLINE __m256i loadu_256(const uint8_t src[32]) {13return _mm256_loadu_si256((const __m256i *)src);14}1516INLINE __m512i loadu_512(const uint8_t src[64]) {17return _mm512_loadu_si512((const __m512i *)src);18}1920INLINE void storeu_128(__m128i src, uint8_t dest[16]) {21_mm_storeu_si128((__m128i *)dest, src);22}2324INLINE void storeu_256(__m256i src, uint8_t dest[16]) {25_mm256_storeu_si256((__m256i *)dest, src);26}2728INLINE __m128i add_128(__m128i a, __m128i b) { return _mm_add_epi32(a, b); }2930INLINE __m256i add_256(__m256i a, __m256i b) { return _mm256_add_epi32(a, b); }3132INLINE __m512i add_512(__m512i a, __m512i b) { return _mm512_add_epi32(a, b); }3334INLINE __m128i xor_128(__m128i a, __m128i b) { return _mm_xor_si128(a, b); }3536INLINE __m256i xor_256(__m256i a, __m256i b) { return _mm256_xor_si256(a, b); }3738INLINE __m512i xor_512(__m512i a, __m512i b) { return _mm512_xor_si512(a, b); }3940INLINE __m128i set1_128(uint32_t x) { return _mm_set1_epi32((int32_t)x); }4142INLINE __m256i set1_256(uint32_t x) { return _mm256_set1_epi32((int32_t)x); }4344INLINE __m512i set1_512(uint32_t x) { return _mm512_set1_epi32((int32_t)x); }4546INLINE __m128i set4(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {47return _mm_setr_epi32((int32_t)a, (int32_t)b, (int32_t)c, (int32_t)d);48}4950INLINE __m128i rot16_128(__m128i x) { return _mm_ror_epi32(x, 16); }5152INLINE __m256i rot16_256(__m256i x) { return _mm256_ror_epi32(x, 16); }5354INLINE __m512i rot16_512(__m512i x) { return _mm512_ror_epi32(x, 16); }5556INLINE __m128i rot12_128(__m128i x) { return _mm_ror_epi32(x, 12); }5758INLINE __m256i rot12_256(__m256i x) { return _mm256_ror_epi32(x, 12); }5960INLINE __m512i rot12_512(__m512i x) { return _mm512_ror_epi32(x, 12); }6162INLINE __m128i rot8_128(__m128i x) { return _mm_ror_epi32(x, 8); }6364INLINE __m256i rot8_256(__m256i x) { return _mm256_ror_epi32(x, 8); }6566INLINE __m512i rot8_512(__m512i x) { return _mm512_ror_epi32(x, 8); }6768INLINE __m128i rot7_128(__m128i x) { return _mm_ror_epi32(x, 7); }6970INLINE __m256i rot7_256(__m256i x) { return _mm256_ror_epi32(x, 7); }7172INLINE __m512i rot7_512(__m512i x) { return _mm512_ror_epi32(x, 7); }7374/*75* ----------------------------------------------------------------------------76* compress_avx51277* ----------------------------------------------------------------------------78*/7980INLINE void g1(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,81__m128i m) {82*row0 = add_128(add_128(*row0, m), *row1);83*row3 = xor_128(*row3, *row0);84*row3 = rot16_128(*row3);85*row2 = add_128(*row2, *row3);86*row1 = xor_128(*row1, *row2);87*row1 = rot12_128(*row1);88}8990INLINE void g2(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,91__m128i m) {92*row0 = add_128(add_128(*row0, m), *row1);93*row3 = xor_128(*row3, *row0);94*row3 = rot8_128(*row3);95*row2 = add_128(*row2, *row3);96*row1 = xor_128(*row1, *row2);97*row1 = rot7_128(*row1);98}99100// Note the optimization here of leaving row1 as the unrotated row, rather than101// row0. All the message loads below are adjusted to compensate for this. See102// discussion at https://github.com/sneves/blake2-avx2/pull/4103INLINE void diagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {104*row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(2, 1, 0, 3));105*row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));106*row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(0, 3, 2, 1));107}108109INLINE void undiagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {110*row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(0, 3, 2, 1));111*row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));112*row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(2, 1, 0, 3));113}114115INLINE void compress_pre(__m128i rows[4], const uint32_t cv[8],116const uint8_t block[BLAKE3_BLOCK_LEN],117uint8_t block_len, uint64_t counter, uint8_t flags) {118rows[0] = loadu_128((uint8_t *)&cv[0]);119rows[1] = loadu_128((uint8_t *)&cv[4]);120rows[2] = set4(IV[0], IV[1], IV[2], IV[3]);121rows[3] = set4(counter_low(counter), counter_high(counter),122(uint32_t)block_len, (uint32_t)flags);123124__m128i m0 = loadu_128(&block[sizeof(__m128i) * 0]);125__m128i m1 = loadu_128(&block[sizeof(__m128i) * 1]);126__m128i m2 = loadu_128(&block[sizeof(__m128i) * 2]);127__m128i m3 = loadu_128(&block[sizeof(__m128i) * 3]);128129__m128i t0, t1, t2, t3, tt;130131// Round 1. The first round permutes the message words from the original132// input order, into the groups that get mixed in parallel.133t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(2, 0, 2, 0)); // 6 4 2 0134g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);135t1 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 3, 1)); // 7 5 3 1136g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);137diagonalize(&rows[0], &rows[2], &rows[3]);138t2 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(2, 0, 2, 0)); // 14 12 10 8139t2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2, 1, 0, 3)); // 12 10 8 14140g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);141t3 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 1, 3, 1)); // 15 13 11 9142t3 = _mm_shuffle_epi32(t3, _MM_SHUFFLE(2, 1, 0, 3)); // 13 11 9 15143g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);144undiagonalize(&rows[0], &rows[2], &rows[3]);145m0 = t0;146m1 = t1;147m2 = t2;148m3 = t3;149150// Round 2. This round and all following rounds apply a fixed permutation151// to the message words from the round before.152t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));153t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));154g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);155t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));156tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));157t1 = _mm_blend_epi16(tt, t1, 0xCC);158g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);159diagonalize(&rows[0], &rows[2], &rows[3]);160t2 = _mm_unpacklo_epi64(m3, m1);161tt = _mm_blend_epi16(t2, m2, 0xC0);162t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));163g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);164t3 = _mm_unpackhi_epi32(m1, m3);165tt = _mm_unpacklo_epi32(m2, t3);166t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));167g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);168undiagonalize(&rows[0], &rows[2], &rows[3]);169m0 = t0;170m1 = t1;171m2 = t2;172m3 = t3;173174// Round 3175t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));176t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));177g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);178t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));179tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));180t1 = _mm_blend_epi16(tt, t1, 0xCC);181g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);182diagonalize(&rows[0], &rows[2], &rows[3]);183t2 = _mm_unpacklo_epi64(m3, m1);184tt = _mm_blend_epi16(t2, m2, 0xC0);185t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));186g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);187t3 = _mm_unpackhi_epi32(m1, m3);188tt = _mm_unpacklo_epi32(m2, t3);189t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));190g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);191undiagonalize(&rows[0], &rows[2], &rows[3]);192m0 = t0;193m1 = t1;194m2 = t2;195m3 = t3;196197// Round 4198t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));199t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));200g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);201t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));202tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));203t1 = _mm_blend_epi16(tt, t1, 0xCC);204g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);205diagonalize(&rows[0], &rows[2], &rows[3]);206t2 = _mm_unpacklo_epi64(m3, m1);207tt = _mm_blend_epi16(t2, m2, 0xC0);208t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));209g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);210t3 = _mm_unpackhi_epi32(m1, m3);211tt = _mm_unpacklo_epi32(m2, t3);212t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));213g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);214undiagonalize(&rows[0], &rows[2], &rows[3]);215m0 = t0;216m1 = t1;217m2 = t2;218m3 = t3;219220// Round 5221t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));222t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));223g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);224t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));225tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));226t1 = _mm_blend_epi16(tt, t1, 0xCC);227g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);228diagonalize(&rows[0], &rows[2], &rows[3]);229t2 = _mm_unpacklo_epi64(m3, m1);230tt = _mm_blend_epi16(t2, m2, 0xC0);231t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));232g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);233t3 = _mm_unpackhi_epi32(m1, m3);234tt = _mm_unpacklo_epi32(m2, t3);235t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));236g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);237undiagonalize(&rows[0], &rows[2], &rows[3]);238m0 = t0;239m1 = t1;240m2 = t2;241m3 = t3;242243// Round 6244t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));245t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));246g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);247t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));248tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));249t1 = _mm_blend_epi16(tt, t1, 0xCC);250g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);251diagonalize(&rows[0], &rows[2], &rows[3]);252t2 = _mm_unpacklo_epi64(m3, m1);253tt = _mm_blend_epi16(t2, m2, 0xC0);254t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));255g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);256t3 = _mm_unpackhi_epi32(m1, m3);257tt = _mm_unpacklo_epi32(m2, t3);258t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));259g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);260undiagonalize(&rows[0], &rows[2], &rows[3]);261m0 = t0;262m1 = t1;263m2 = t2;264m3 = t3;265266// Round 7267t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));268t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));269g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);270t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));271tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));272t1 = _mm_blend_epi16(tt, t1, 0xCC);273g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);274diagonalize(&rows[0], &rows[2], &rows[3]);275t2 = _mm_unpacklo_epi64(m3, m1);276tt = _mm_blend_epi16(t2, m2, 0xC0);277t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));278g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);279t3 = _mm_unpackhi_epi32(m1, m3);280tt = _mm_unpacklo_epi32(m2, t3);281t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));282g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);283undiagonalize(&rows[0], &rows[2], &rows[3]);284}285286void blake3_compress_xof_avx512(const uint32_t cv[8],287const uint8_t block[BLAKE3_BLOCK_LEN],288uint8_t block_len, uint64_t counter,289uint8_t flags, uint8_t out[64]) {290__m128i rows[4];291compress_pre(rows, cv, block, block_len, counter, flags);292storeu_128(xor_128(rows[0], rows[2]), &out[0]);293storeu_128(xor_128(rows[1], rows[3]), &out[16]);294storeu_128(xor_128(rows[2], loadu_128((uint8_t *)&cv[0])), &out[32]);295storeu_128(xor_128(rows[3], loadu_128((uint8_t *)&cv[4])), &out[48]);296}297298void blake3_compress_in_place_avx512(uint32_t cv[8],299const uint8_t block[BLAKE3_BLOCK_LEN],300uint8_t block_len, uint64_t counter,301uint8_t flags) {302__m128i rows[4];303compress_pre(rows, cv, block, block_len, counter, flags);304storeu_128(xor_128(rows[0], rows[2]), (uint8_t *)&cv[0]);305storeu_128(xor_128(rows[1], rows[3]), (uint8_t *)&cv[4]);306}307308/*309* ----------------------------------------------------------------------------310* hash4_avx512311* ----------------------------------------------------------------------------312*/313314INLINE void round_fn4(__m128i v[16], __m128i m[16], size_t r) {315v[0] = add_128(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);316v[1] = add_128(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);317v[2] = add_128(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);318v[3] = add_128(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);319v[0] = add_128(v[0], v[4]);320v[1] = add_128(v[1], v[5]);321v[2] = add_128(v[2], v[6]);322v[3] = add_128(v[3], v[7]);323v[12] = xor_128(v[12], v[0]);324v[13] = xor_128(v[13], v[1]);325v[14] = xor_128(v[14], v[2]);326v[15] = xor_128(v[15], v[3]);327v[12] = rot16_128(v[12]);328v[13] = rot16_128(v[13]);329v[14] = rot16_128(v[14]);330v[15] = rot16_128(v[15]);331v[8] = add_128(v[8], v[12]);332v[9] = add_128(v[9], v[13]);333v[10] = add_128(v[10], v[14]);334v[11] = add_128(v[11], v[15]);335v[4] = xor_128(v[4], v[8]);336v[5] = xor_128(v[5], v[9]);337v[6] = xor_128(v[6], v[10]);338v[7] = xor_128(v[7], v[11]);339v[4] = rot12_128(v[4]);340v[5] = rot12_128(v[5]);341v[6] = rot12_128(v[6]);342v[7] = rot12_128(v[7]);343v[0] = add_128(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);344v[1] = add_128(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);345v[2] = add_128(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);346v[3] = add_128(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);347v[0] = add_128(v[0], v[4]);348v[1] = add_128(v[1], v[5]);349v[2] = add_128(v[2], v[6]);350v[3] = add_128(v[3], v[7]);351v[12] = xor_128(v[12], v[0]);352v[13] = xor_128(v[13], v[1]);353v[14] = xor_128(v[14], v[2]);354v[15] = xor_128(v[15], v[3]);355v[12] = rot8_128(v[12]);356v[13] = rot8_128(v[13]);357v[14] = rot8_128(v[14]);358v[15] = rot8_128(v[15]);359v[8] = add_128(v[8], v[12]);360v[9] = add_128(v[9], v[13]);361v[10] = add_128(v[10], v[14]);362v[11] = add_128(v[11], v[15]);363v[4] = xor_128(v[4], v[8]);364v[5] = xor_128(v[5], v[9]);365v[6] = xor_128(v[6], v[10]);366v[7] = xor_128(v[7], v[11]);367v[4] = rot7_128(v[4]);368v[5] = rot7_128(v[5]);369v[6] = rot7_128(v[6]);370v[7] = rot7_128(v[7]);371372v[0] = add_128(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);373v[1] = add_128(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);374v[2] = add_128(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);375v[3] = add_128(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);376v[0] = add_128(v[0], v[5]);377v[1] = add_128(v[1], v[6]);378v[2] = add_128(v[2], v[7]);379v[3] = add_128(v[3], v[4]);380v[15] = xor_128(v[15], v[0]);381v[12] = xor_128(v[12], v[1]);382v[13] = xor_128(v[13], v[2]);383v[14] = xor_128(v[14], v[3]);384v[15] = rot16_128(v[15]);385v[12] = rot16_128(v[12]);386v[13] = rot16_128(v[13]);387v[14] = rot16_128(v[14]);388v[10] = add_128(v[10], v[15]);389v[11] = add_128(v[11], v[12]);390v[8] = add_128(v[8], v[13]);391v[9] = add_128(v[9], v[14]);392v[5] = xor_128(v[5], v[10]);393v[6] = xor_128(v[6], v[11]);394v[7] = xor_128(v[7], v[8]);395v[4] = xor_128(v[4], v[9]);396v[5] = rot12_128(v[5]);397v[6] = rot12_128(v[6]);398v[7] = rot12_128(v[7]);399v[4] = rot12_128(v[4]);400v[0] = add_128(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);401v[1] = add_128(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);402v[2] = add_128(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);403v[3] = add_128(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);404v[0] = add_128(v[0], v[5]);405v[1] = add_128(v[1], v[6]);406v[2] = add_128(v[2], v[7]);407v[3] = add_128(v[3], v[4]);408v[15] = xor_128(v[15], v[0]);409v[12] = xor_128(v[12], v[1]);410v[13] = xor_128(v[13], v[2]);411v[14] = xor_128(v[14], v[3]);412v[15] = rot8_128(v[15]);413v[12] = rot8_128(v[12]);414v[13] = rot8_128(v[13]);415v[14] = rot8_128(v[14]);416v[10] = add_128(v[10], v[15]);417v[11] = add_128(v[11], v[12]);418v[8] = add_128(v[8], v[13]);419v[9] = add_128(v[9], v[14]);420v[5] = xor_128(v[5], v[10]);421v[6] = xor_128(v[6], v[11]);422v[7] = xor_128(v[7], v[8]);423v[4] = xor_128(v[4], v[9]);424v[5] = rot7_128(v[5]);425v[6] = rot7_128(v[6]);426v[7] = rot7_128(v[7]);427v[4] = rot7_128(v[4]);428}429430INLINE void transpose_vecs_128(__m128i vecs[4]) {431// Interleave 32-bit lates. The low unpack is lanes 00/11 and the high is432// 22/33. Note that this doesn't split the vector into two lanes, as the433// AVX2 counterparts do.434__m128i ab_01 = _mm_unpacklo_epi32(vecs[0], vecs[1]);435__m128i ab_23 = _mm_unpackhi_epi32(vecs[0], vecs[1]);436__m128i cd_01 = _mm_unpacklo_epi32(vecs[2], vecs[3]);437__m128i cd_23 = _mm_unpackhi_epi32(vecs[2], vecs[3]);438439// Interleave 64-bit lanes.440__m128i abcd_0 = _mm_unpacklo_epi64(ab_01, cd_01);441__m128i abcd_1 = _mm_unpackhi_epi64(ab_01, cd_01);442__m128i abcd_2 = _mm_unpacklo_epi64(ab_23, cd_23);443__m128i abcd_3 = _mm_unpackhi_epi64(ab_23, cd_23);444445vecs[0] = abcd_0;446vecs[1] = abcd_1;447vecs[2] = abcd_2;448vecs[3] = abcd_3;449}450451INLINE void transpose_msg_vecs4(const uint8_t *const *inputs,452size_t block_offset, __m128i out[16]) {453out[0] = loadu_128(&inputs[0][block_offset + 0 * sizeof(__m128i)]);454out[1] = loadu_128(&inputs[1][block_offset + 0 * sizeof(__m128i)]);455out[2] = loadu_128(&inputs[2][block_offset + 0 * sizeof(__m128i)]);456out[3] = loadu_128(&inputs[3][block_offset + 0 * sizeof(__m128i)]);457out[4] = loadu_128(&inputs[0][block_offset + 1 * sizeof(__m128i)]);458out[5] = loadu_128(&inputs[1][block_offset + 1 * sizeof(__m128i)]);459out[6] = loadu_128(&inputs[2][block_offset + 1 * sizeof(__m128i)]);460out[7] = loadu_128(&inputs[3][block_offset + 1 * sizeof(__m128i)]);461out[8] = loadu_128(&inputs[0][block_offset + 2 * sizeof(__m128i)]);462out[9] = loadu_128(&inputs[1][block_offset + 2 * sizeof(__m128i)]);463out[10] = loadu_128(&inputs[2][block_offset + 2 * sizeof(__m128i)]);464out[11] = loadu_128(&inputs[3][block_offset + 2 * sizeof(__m128i)]);465out[12] = loadu_128(&inputs[0][block_offset + 3 * sizeof(__m128i)]);466out[13] = loadu_128(&inputs[1][block_offset + 3 * sizeof(__m128i)]);467out[14] = loadu_128(&inputs[2][block_offset + 3 * sizeof(__m128i)]);468out[15] = loadu_128(&inputs[3][block_offset + 3 * sizeof(__m128i)]);469for (size_t i = 0; i < 4; ++i) {470_mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);471}472transpose_vecs_128(&out[0]);473transpose_vecs_128(&out[4]);474transpose_vecs_128(&out[8]);475transpose_vecs_128(&out[12]);476}477478INLINE void load_counters4(uint64_t counter, bool increment_counter,479__m128i *out_lo, __m128i *out_hi) {480uint64_t mask = (increment_counter ? ~0 : 0);481__m256i mask_vec = _mm256_set1_epi64x(mask);482__m256i deltas = _mm256_setr_epi64x(0, 1, 2, 3);483deltas = _mm256_and_si256(mask_vec, deltas);484__m256i counters =485_mm256_add_epi64(_mm256_set1_epi64x((int64_t)counter), deltas);486*out_lo = _mm256_cvtepi64_epi32(counters);487*out_hi = _mm256_cvtepi64_epi32(_mm256_srli_epi64(counters, 32));488}489490static491void blake3_hash4_avx512(const uint8_t *const *inputs, size_t blocks,492const uint32_t key[8], uint64_t counter,493bool increment_counter, uint8_t flags,494uint8_t flags_start, uint8_t flags_end, uint8_t *out) {495__m128i h_vecs[8] = {496set1_128(key[0]), set1_128(key[1]), set1_128(key[2]), set1_128(key[3]),497set1_128(key[4]), set1_128(key[5]), set1_128(key[6]), set1_128(key[7]),498};499__m128i counter_low_vec, counter_high_vec;500load_counters4(counter, increment_counter, &counter_low_vec,501&counter_high_vec);502uint8_t block_flags = flags | flags_start;503504for (size_t block = 0; block < blocks; block++) {505if (block + 1 == blocks) {506block_flags |= flags_end;507}508__m128i block_len_vec = set1_128(BLAKE3_BLOCK_LEN);509__m128i block_flags_vec = set1_128(block_flags);510__m128i msg_vecs[16];511transpose_msg_vecs4(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);512513__m128i v[16] = {514h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],515h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],516set1_128(IV[0]), set1_128(IV[1]), set1_128(IV[2]), set1_128(IV[3]),517counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,518};519round_fn4(v, msg_vecs, 0);520round_fn4(v, msg_vecs, 1);521round_fn4(v, msg_vecs, 2);522round_fn4(v, msg_vecs, 3);523round_fn4(v, msg_vecs, 4);524round_fn4(v, msg_vecs, 5);525round_fn4(v, msg_vecs, 6);526h_vecs[0] = xor_128(v[0], v[8]);527h_vecs[1] = xor_128(v[1], v[9]);528h_vecs[2] = xor_128(v[2], v[10]);529h_vecs[3] = xor_128(v[3], v[11]);530h_vecs[4] = xor_128(v[4], v[12]);531h_vecs[5] = xor_128(v[5], v[13]);532h_vecs[6] = xor_128(v[6], v[14]);533h_vecs[7] = xor_128(v[7], v[15]);534535block_flags = flags;536}537538transpose_vecs_128(&h_vecs[0]);539transpose_vecs_128(&h_vecs[4]);540// The first four vecs now contain the first half of each output, and the541// second four vecs contain the second half of each output.542storeu_128(h_vecs[0], &out[0 * sizeof(__m128i)]);543storeu_128(h_vecs[4], &out[1 * sizeof(__m128i)]);544storeu_128(h_vecs[1], &out[2 * sizeof(__m128i)]);545storeu_128(h_vecs[5], &out[3 * sizeof(__m128i)]);546storeu_128(h_vecs[2], &out[4 * sizeof(__m128i)]);547storeu_128(h_vecs[6], &out[5 * sizeof(__m128i)]);548storeu_128(h_vecs[3], &out[6 * sizeof(__m128i)]);549storeu_128(h_vecs[7], &out[7 * sizeof(__m128i)]);550}551552/*553* ----------------------------------------------------------------------------554* hash8_avx512555* ----------------------------------------------------------------------------556*/557558INLINE void round_fn8(__m256i v[16], __m256i m[16], size_t r) {559v[0] = add_256(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);560v[1] = add_256(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);561v[2] = add_256(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);562v[3] = add_256(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);563v[0] = add_256(v[0], v[4]);564v[1] = add_256(v[1], v[5]);565v[2] = add_256(v[2], v[6]);566v[3] = add_256(v[3], v[7]);567v[12] = xor_256(v[12], v[0]);568v[13] = xor_256(v[13], v[1]);569v[14] = xor_256(v[14], v[2]);570v[15] = xor_256(v[15], v[3]);571v[12] = rot16_256(v[12]);572v[13] = rot16_256(v[13]);573v[14] = rot16_256(v[14]);574v[15] = rot16_256(v[15]);575v[8] = add_256(v[8], v[12]);576v[9] = add_256(v[9], v[13]);577v[10] = add_256(v[10], v[14]);578v[11] = add_256(v[11], v[15]);579v[4] = xor_256(v[4], v[8]);580v[5] = xor_256(v[5], v[9]);581v[6] = xor_256(v[6], v[10]);582v[7] = xor_256(v[7], v[11]);583v[4] = rot12_256(v[4]);584v[5] = rot12_256(v[5]);585v[6] = rot12_256(v[6]);586v[7] = rot12_256(v[7]);587v[0] = add_256(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);588v[1] = add_256(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);589v[2] = add_256(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);590v[3] = add_256(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);591v[0] = add_256(v[0], v[4]);592v[1] = add_256(v[1], v[5]);593v[2] = add_256(v[2], v[6]);594v[3] = add_256(v[3], v[7]);595v[12] = xor_256(v[12], v[0]);596v[13] = xor_256(v[13], v[1]);597v[14] = xor_256(v[14], v[2]);598v[15] = xor_256(v[15], v[3]);599v[12] = rot8_256(v[12]);600v[13] = rot8_256(v[13]);601v[14] = rot8_256(v[14]);602v[15] = rot8_256(v[15]);603v[8] = add_256(v[8], v[12]);604v[9] = add_256(v[9], v[13]);605v[10] = add_256(v[10], v[14]);606v[11] = add_256(v[11], v[15]);607v[4] = xor_256(v[4], v[8]);608v[5] = xor_256(v[5], v[9]);609v[6] = xor_256(v[6], v[10]);610v[7] = xor_256(v[7], v[11]);611v[4] = rot7_256(v[4]);612v[5] = rot7_256(v[5]);613v[6] = rot7_256(v[6]);614v[7] = rot7_256(v[7]);615616v[0] = add_256(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);617v[1] = add_256(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);618v[2] = add_256(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);619v[3] = add_256(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);620v[0] = add_256(v[0], v[5]);621v[1] = add_256(v[1], v[6]);622v[2] = add_256(v[2], v[7]);623v[3] = add_256(v[3], v[4]);624v[15] = xor_256(v[15], v[0]);625v[12] = xor_256(v[12], v[1]);626v[13] = xor_256(v[13], v[2]);627v[14] = xor_256(v[14], v[3]);628v[15] = rot16_256(v[15]);629v[12] = rot16_256(v[12]);630v[13] = rot16_256(v[13]);631v[14] = rot16_256(v[14]);632v[10] = add_256(v[10], v[15]);633v[11] = add_256(v[11], v[12]);634v[8] = add_256(v[8], v[13]);635v[9] = add_256(v[9], v[14]);636v[5] = xor_256(v[5], v[10]);637v[6] = xor_256(v[6], v[11]);638v[7] = xor_256(v[7], v[8]);639v[4] = xor_256(v[4], v[9]);640v[5] = rot12_256(v[5]);641v[6] = rot12_256(v[6]);642v[7] = rot12_256(v[7]);643v[4] = rot12_256(v[4]);644v[0] = add_256(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);645v[1] = add_256(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);646v[2] = add_256(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);647v[3] = add_256(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);648v[0] = add_256(v[0], v[5]);649v[1] = add_256(v[1], v[6]);650v[2] = add_256(v[2], v[7]);651v[3] = add_256(v[3], v[4]);652v[15] = xor_256(v[15], v[0]);653v[12] = xor_256(v[12], v[1]);654v[13] = xor_256(v[13], v[2]);655v[14] = xor_256(v[14], v[3]);656v[15] = rot8_256(v[15]);657v[12] = rot8_256(v[12]);658v[13] = rot8_256(v[13]);659v[14] = rot8_256(v[14]);660v[10] = add_256(v[10], v[15]);661v[11] = add_256(v[11], v[12]);662v[8] = add_256(v[8], v[13]);663v[9] = add_256(v[9], v[14]);664v[5] = xor_256(v[5], v[10]);665v[6] = xor_256(v[6], v[11]);666v[7] = xor_256(v[7], v[8]);667v[4] = xor_256(v[4], v[9]);668v[5] = rot7_256(v[5]);669v[6] = rot7_256(v[6]);670v[7] = rot7_256(v[7]);671v[4] = rot7_256(v[4]);672}673674INLINE void transpose_vecs_256(__m256i vecs[8]) {675// Interleave 32-bit lanes. The low unpack is lanes 00/11/44/55, and the high676// is 22/33/66/77.677__m256i ab_0145 = _mm256_unpacklo_epi32(vecs[0], vecs[1]);678__m256i ab_2367 = _mm256_unpackhi_epi32(vecs[0], vecs[1]);679__m256i cd_0145 = _mm256_unpacklo_epi32(vecs[2], vecs[3]);680__m256i cd_2367 = _mm256_unpackhi_epi32(vecs[2], vecs[3]);681__m256i ef_0145 = _mm256_unpacklo_epi32(vecs[4], vecs[5]);682__m256i ef_2367 = _mm256_unpackhi_epi32(vecs[4], vecs[5]);683__m256i gh_0145 = _mm256_unpacklo_epi32(vecs[6], vecs[7]);684__m256i gh_2367 = _mm256_unpackhi_epi32(vecs[6], vecs[7]);685686// Interleave 64-bit lates. The low unpack is lanes 00/22 and the high is687// 11/33.688__m256i abcd_04 = _mm256_unpacklo_epi64(ab_0145, cd_0145);689__m256i abcd_15 = _mm256_unpackhi_epi64(ab_0145, cd_0145);690__m256i abcd_26 = _mm256_unpacklo_epi64(ab_2367, cd_2367);691__m256i abcd_37 = _mm256_unpackhi_epi64(ab_2367, cd_2367);692__m256i efgh_04 = _mm256_unpacklo_epi64(ef_0145, gh_0145);693__m256i efgh_15 = _mm256_unpackhi_epi64(ef_0145, gh_0145);694__m256i efgh_26 = _mm256_unpacklo_epi64(ef_2367, gh_2367);695__m256i efgh_37 = _mm256_unpackhi_epi64(ef_2367, gh_2367);696697// Interleave 128-bit lanes.698vecs[0] = _mm256_permute2x128_si256(abcd_04, efgh_04, 0x20);699vecs[1] = _mm256_permute2x128_si256(abcd_15, efgh_15, 0x20);700vecs[2] = _mm256_permute2x128_si256(abcd_26, efgh_26, 0x20);701vecs[3] = _mm256_permute2x128_si256(abcd_37, efgh_37, 0x20);702vecs[4] = _mm256_permute2x128_si256(abcd_04, efgh_04, 0x31);703vecs[5] = _mm256_permute2x128_si256(abcd_15, efgh_15, 0x31);704vecs[6] = _mm256_permute2x128_si256(abcd_26, efgh_26, 0x31);705vecs[7] = _mm256_permute2x128_si256(abcd_37, efgh_37, 0x31);706}707708INLINE void transpose_msg_vecs8(const uint8_t *const *inputs,709size_t block_offset, __m256i out[16]) {710out[0] = loadu_256(&inputs[0][block_offset + 0 * sizeof(__m256i)]);711out[1] = loadu_256(&inputs[1][block_offset + 0 * sizeof(__m256i)]);712out[2] = loadu_256(&inputs[2][block_offset + 0 * sizeof(__m256i)]);713out[3] = loadu_256(&inputs[3][block_offset + 0 * sizeof(__m256i)]);714out[4] = loadu_256(&inputs[4][block_offset + 0 * sizeof(__m256i)]);715out[5] = loadu_256(&inputs[5][block_offset + 0 * sizeof(__m256i)]);716out[6] = loadu_256(&inputs[6][block_offset + 0 * sizeof(__m256i)]);717out[7] = loadu_256(&inputs[7][block_offset + 0 * sizeof(__m256i)]);718out[8] = loadu_256(&inputs[0][block_offset + 1 * sizeof(__m256i)]);719out[9] = loadu_256(&inputs[1][block_offset + 1 * sizeof(__m256i)]);720out[10] = loadu_256(&inputs[2][block_offset + 1 * sizeof(__m256i)]);721out[11] = loadu_256(&inputs[3][block_offset + 1 * sizeof(__m256i)]);722out[12] = loadu_256(&inputs[4][block_offset + 1 * sizeof(__m256i)]);723out[13] = loadu_256(&inputs[5][block_offset + 1 * sizeof(__m256i)]);724out[14] = loadu_256(&inputs[6][block_offset + 1 * sizeof(__m256i)]);725out[15] = loadu_256(&inputs[7][block_offset + 1 * sizeof(__m256i)]);726for (size_t i = 0; i < 8; ++i) {727_mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);728}729transpose_vecs_256(&out[0]);730transpose_vecs_256(&out[8]);731}732733INLINE void load_counters8(uint64_t counter, bool increment_counter,734__m256i *out_lo, __m256i *out_hi) {735uint64_t mask = (increment_counter ? ~0 : 0);736__m512i mask_vec = _mm512_set1_epi64(mask);737__m512i deltas = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7);738deltas = _mm512_and_si512(mask_vec, deltas);739__m512i counters =740_mm512_add_epi64(_mm512_set1_epi64((int64_t)counter), deltas);741*out_lo = _mm512_cvtepi64_epi32(counters);742*out_hi = _mm512_cvtepi64_epi32(_mm512_srli_epi64(counters, 32));743}744745static746void blake3_hash8_avx512(const uint8_t *const *inputs, size_t blocks,747const uint32_t key[8], uint64_t counter,748bool increment_counter, uint8_t flags,749uint8_t flags_start, uint8_t flags_end, uint8_t *out) {750__m256i h_vecs[8] = {751set1_256(key[0]), set1_256(key[1]), set1_256(key[2]), set1_256(key[3]),752set1_256(key[4]), set1_256(key[5]), set1_256(key[6]), set1_256(key[7]),753};754__m256i counter_low_vec, counter_high_vec;755load_counters8(counter, increment_counter, &counter_low_vec,756&counter_high_vec);757uint8_t block_flags = flags | flags_start;758759for (size_t block = 0; block < blocks; block++) {760if (block + 1 == blocks) {761block_flags |= flags_end;762}763__m256i block_len_vec = set1_256(BLAKE3_BLOCK_LEN);764__m256i block_flags_vec = set1_256(block_flags);765__m256i msg_vecs[16];766transpose_msg_vecs8(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);767768__m256i v[16] = {769h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],770h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],771set1_256(IV[0]), set1_256(IV[1]), set1_256(IV[2]), set1_256(IV[3]),772counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,773};774round_fn8(v, msg_vecs, 0);775round_fn8(v, msg_vecs, 1);776round_fn8(v, msg_vecs, 2);777round_fn8(v, msg_vecs, 3);778round_fn8(v, msg_vecs, 4);779round_fn8(v, msg_vecs, 5);780round_fn8(v, msg_vecs, 6);781h_vecs[0] = xor_256(v[0], v[8]);782h_vecs[1] = xor_256(v[1], v[9]);783h_vecs[2] = xor_256(v[2], v[10]);784h_vecs[3] = xor_256(v[3], v[11]);785h_vecs[4] = xor_256(v[4], v[12]);786h_vecs[5] = xor_256(v[5], v[13]);787h_vecs[6] = xor_256(v[6], v[14]);788h_vecs[7] = xor_256(v[7], v[15]);789790block_flags = flags;791}792793transpose_vecs_256(h_vecs);794storeu_256(h_vecs[0], &out[0 * sizeof(__m256i)]);795storeu_256(h_vecs[1], &out[1 * sizeof(__m256i)]);796storeu_256(h_vecs[2], &out[2 * sizeof(__m256i)]);797storeu_256(h_vecs[3], &out[3 * sizeof(__m256i)]);798storeu_256(h_vecs[4], &out[4 * sizeof(__m256i)]);799storeu_256(h_vecs[5], &out[5 * sizeof(__m256i)]);800storeu_256(h_vecs[6], &out[6 * sizeof(__m256i)]);801storeu_256(h_vecs[7], &out[7 * sizeof(__m256i)]);802}803804/*805* ----------------------------------------------------------------------------806* hash16_avx512807* ----------------------------------------------------------------------------808*/809810INLINE void round_fn16(__m512i v[16], __m512i m[16], size_t r) {811v[0] = add_512(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);812v[1] = add_512(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);813v[2] = add_512(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);814v[3] = add_512(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);815v[0] = add_512(v[0], v[4]);816v[1] = add_512(v[1], v[5]);817v[2] = add_512(v[2], v[6]);818v[3] = add_512(v[3], v[7]);819v[12] = xor_512(v[12], v[0]);820v[13] = xor_512(v[13], v[1]);821v[14] = xor_512(v[14], v[2]);822v[15] = xor_512(v[15], v[3]);823v[12] = rot16_512(v[12]);824v[13] = rot16_512(v[13]);825v[14] = rot16_512(v[14]);826v[15] = rot16_512(v[15]);827v[8] = add_512(v[8], v[12]);828v[9] = add_512(v[9], v[13]);829v[10] = add_512(v[10], v[14]);830v[11] = add_512(v[11], v[15]);831v[4] = xor_512(v[4], v[8]);832v[5] = xor_512(v[5], v[9]);833v[6] = xor_512(v[6], v[10]);834v[7] = xor_512(v[7], v[11]);835v[4] = rot12_512(v[4]);836v[5] = rot12_512(v[5]);837v[6] = rot12_512(v[6]);838v[7] = rot12_512(v[7]);839v[0] = add_512(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);840v[1] = add_512(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);841v[2] = add_512(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);842v[3] = add_512(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);843v[0] = add_512(v[0], v[4]);844v[1] = add_512(v[1], v[5]);845v[2] = add_512(v[2], v[6]);846v[3] = add_512(v[3], v[7]);847v[12] = xor_512(v[12], v[0]);848v[13] = xor_512(v[13], v[1]);849v[14] = xor_512(v[14], v[2]);850v[15] = xor_512(v[15], v[3]);851v[12] = rot8_512(v[12]);852v[13] = rot8_512(v[13]);853v[14] = rot8_512(v[14]);854v[15] = rot8_512(v[15]);855v[8] = add_512(v[8], v[12]);856v[9] = add_512(v[9], v[13]);857v[10] = add_512(v[10], v[14]);858v[11] = add_512(v[11], v[15]);859v[4] = xor_512(v[4], v[8]);860v[5] = xor_512(v[5], v[9]);861v[6] = xor_512(v[6], v[10]);862v[7] = xor_512(v[7], v[11]);863v[4] = rot7_512(v[4]);864v[5] = rot7_512(v[5]);865v[6] = rot7_512(v[6]);866v[7] = rot7_512(v[7]);867868v[0] = add_512(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);869v[1] = add_512(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);870v[2] = add_512(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);871v[3] = add_512(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);872v[0] = add_512(v[0], v[5]);873v[1] = add_512(v[1], v[6]);874v[2] = add_512(v[2], v[7]);875v[3] = add_512(v[3], v[4]);876v[15] = xor_512(v[15], v[0]);877v[12] = xor_512(v[12], v[1]);878v[13] = xor_512(v[13], v[2]);879v[14] = xor_512(v[14], v[3]);880v[15] = rot16_512(v[15]);881v[12] = rot16_512(v[12]);882v[13] = rot16_512(v[13]);883v[14] = rot16_512(v[14]);884v[10] = add_512(v[10], v[15]);885v[11] = add_512(v[11], v[12]);886v[8] = add_512(v[8], v[13]);887v[9] = add_512(v[9], v[14]);888v[5] = xor_512(v[5], v[10]);889v[6] = xor_512(v[6], v[11]);890v[7] = xor_512(v[7], v[8]);891v[4] = xor_512(v[4], v[9]);892v[5] = rot12_512(v[5]);893v[6] = rot12_512(v[6]);894v[7] = rot12_512(v[7]);895v[4] = rot12_512(v[4]);896v[0] = add_512(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);897v[1] = add_512(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);898v[2] = add_512(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);899v[3] = add_512(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);900v[0] = add_512(v[0], v[5]);901v[1] = add_512(v[1], v[6]);902v[2] = add_512(v[2], v[7]);903v[3] = add_512(v[3], v[4]);904v[15] = xor_512(v[15], v[0]);905v[12] = xor_512(v[12], v[1]);906v[13] = xor_512(v[13], v[2]);907v[14] = xor_512(v[14], v[3]);908v[15] = rot8_512(v[15]);909v[12] = rot8_512(v[12]);910v[13] = rot8_512(v[13]);911v[14] = rot8_512(v[14]);912v[10] = add_512(v[10], v[15]);913v[11] = add_512(v[11], v[12]);914v[8] = add_512(v[8], v[13]);915v[9] = add_512(v[9], v[14]);916v[5] = xor_512(v[5], v[10]);917v[6] = xor_512(v[6], v[11]);918v[7] = xor_512(v[7], v[8]);919v[4] = xor_512(v[4], v[9]);920v[5] = rot7_512(v[5]);921v[6] = rot7_512(v[6]);922v[7] = rot7_512(v[7]);923v[4] = rot7_512(v[4]);924}925926// 0b10001000, or lanes a0/a2/b0/b2 in little-endian order927#define LO_IMM8 0x88928929INLINE __m512i unpack_lo_128(__m512i a, __m512i b) {930return _mm512_shuffle_i32x4(a, b, LO_IMM8);931}932933// 0b11011101, or lanes a1/a3/b1/b3 in little-endian order934#define HI_IMM8 0xdd935936INLINE __m512i unpack_hi_128(__m512i a, __m512i b) {937return _mm512_shuffle_i32x4(a, b, HI_IMM8);938}939940INLINE void transpose_vecs_512(__m512i vecs[16]) {941// Interleave 32-bit lanes. The _0 unpack is lanes942// 0/0/1/1/4/4/5/5/8/8/9/9/12/12/13/13, and the _2 unpack is lanes943// 2/2/3/3/6/6/7/7/10/10/11/11/14/14/15/15.944__m512i ab_0 = _mm512_unpacklo_epi32(vecs[0], vecs[1]);945__m512i ab_2 = _mm512_unpackhi_epi32(vecs[0], vecs[1]);946__m512i cd_0 = _mm512_unpacklo_epi32(vecs[2], vecs[3]);947__m512i cd_2 = _mm512_unpackhi_epi32(vecs[2], vecs[3]);948__m512i ef_0 = _mm512_unpacklo_epi32(vecs[4], vecs[5]);949__m512i ef_2 = _mm512_unpackhi_epi32(vecs[4], vecs[5]);950__m512i gh_0 = _mm512_unpacklo_epi32(vecs[6], vecs[7]);951__m512i gh_2 = _mm512_unpackhi_epi32(vecs[6], vecs[7]);952__m512i ij_0 = _mm512_unpacklo_epi32(vecs[8], vecs[9]);953__m512i ij_2 = _mm512_unpackhi_epi32(vecs[8], vecs[9]);954__m512i kl_0 = _mm512_unpacklo_epi32(vecs[10], vecs[11]);955__m512i kl_2 = _mm512_unpackhi_epi32(vecs[10], vecs[11]);956__m512i mn_0 = _mm512_unpacklo_epi32(vecs[12], vecs[13]);957__m512i mn_2 = _mm512_unpackhi_epi32(vecs[12], vecs[13]);958__m512i op_0 = _mm512_unpacklo_epi32(vecs[14], vecs[15]);959__m512i op_2 = _mm512_unpackhi_epi32(vecs[14], vecs[15]);960961// Interleave 64-bit lates. The _0 unpack is lanes962// 0/0/0/0/4/4/4/4/8/8/8/8/12/12/12/12, the _1 unpack is lanes963// 1/1/1/1/5/5/5/5/9/9/9/9/13/13/13/13, the _2 unpack is lanes964// 2/2/2/2/6/6/6/6/10/10/10/10/14/14/14/14, and the _3 unpack is lanes965// 3/3/3/3/7/7/7/7/11/11/11/11/15/15/15/15.966__m512i abcd_0 = _mm512_unpacklo_epi64(ab_0, cd_0);967__m512i abcd_1 = _mm512_unpackhi_epi64(ab_0, cd_0);968__m512i abcd_2 = _mm512_unpacklo_epi64(ab_2, cd_2);969__m512i abcd_3 = _mm512_unpackhi_epi64(ab_2, cd_2);970__m512i efgh_0 = _mm512_unpacklo_epi64(ef_0, gh_0);971__m512i efgh_1 = _mm512_unpackhi_epi64(ef_0, gh_0);972__m512i efgh_2 = _mm512_unpacklo_epi64(ef_2, gh_2);973__m512i efgh_3 = _mm512_unpackhi_epi64(ef_2, gh_2);974__m512i ijkl_0 = _mm512_unpacklo_epi64(ij_0, kl_0);975__m512i ijkl_1 = _mm512_unpackhi_epi64(ij_0, kl_0);976__m512i ijkl_2 = _mm512_unpacklo_epi64(ij_2, kl_2);977__m512i ijkl_3 = _mm512_unpackhi_epi64(ij_2, kl_2);978__m512i mnop_0 = _mm512_unpacklo_epi64(mn_0, op_0);979__m512i mnop_1 = _mm512_unpackhi_epi64(mn_0, op_0);980__m512i mnop_2 = _mm512_unpacklo_epi64(mn_2, op_2);981__m512i mnop_3 = _mm512_unpackhi_epi64(mn_2, op_2);982983// Interleave 128-bit lanes. The _0 unpack is984// 0/0/0/0/8/8/8/8/0/0/0/0/8/8/8/8, the _1 unpack is985// 1/1/1/1/9/9/9/9/1/1/1/1/9/9/9/9, and so on.986__m512i abcdefgh_0 = unpack_lo_128(abcd_0, efgh_0);987__m512i abcdefgh_1 = unpack_lo_128(abcd_1, efgh_1);988__m512i abcdefgh_2 = unpack_lo_128(abcd_2, efgh_2);989__m512i abcdefgh_3 = unpack_lo_128(abcd_3, efgh_3);990__m512i abcdefgh_4 = unpack_hi_128(abcd_0, efgh_0);991__m512i abcdefgh_5 = unpack_hi_128(abcd_1, efgh_1);992__m512i abcdefgh_6 = unpack_hi_128(abcd_2, efgh_2);993__m512i abcdefgh_7 = unpack_hi_128(abcd_3, efgh_3);994__m512i ijklmnop_0 = unpack_lo_128(ijkl_0, mnop_0);995__m512i ijklmnop_1 = unpack_lo_128(ijkl_1, mnop_1);996__m512i ijklmnop_2 = unpack_lo_128(ijkl_2, mnop_2);997__m512i ijklmnop_3 = unpack_lo_128(ijkl_3, mnop_3);998__m512i ijklmnop_4 = unpack_hi_128(ijkl_0, mnop_0);999__m512i ijklmnop_5 = unpack_hi_128(ijkl_1, mnop_1);1000__m512i ijklmnop_6 = unpack_hi_128(ijkl_2, mnop_2);1001__m512i ijklmnop_7 = unpack_hi_128(ijkl_3, mnop_3);10021003// Interleave 128-bit lanes again for the final outputs.1004vecs[0] = unpack_lo_128(abcdefgh_0, ijklmnop_0);1005vecs[1] = unpack_lo_128(abcdefgh_1, ijklmnop_1);1006vecs[2] = unpack_lo_128(abcdefgh_2, ijklmnop_2);1007vecs[3] = unpack_lo_128(abcdefgh_3, ijklmnop_3);1008vecs[4] = unpack_lo_128(abcdefgh_4, ijklmnop_4);1009vecs[5] = unpack_lo_128(abcdefgh_5, ijklmnop_5);1010vecs[6] = unpack_lo_128(abcdefgh_6, ijklmnop_6);1011vecs[7] = unpack_lo_128(abcdefgh_7, ijklmnop_7);1012vecs[8] = unpack_hi_128(abcdefgh_0, ijklmnop_0);1013vecs[9] = unpack_hi_128(abcdefgh_1, ijklmnop_1);1014vecs[10] = unpack_hi_128(abcdefgh_2, ijklmnop_2);1015vecs[11] = unpack_hi_128(abcdefgh_3, ijklmnop_3);1016vecs[12] = unpack_hi_128(abcdefgh_4, ijklmnop_4);1017vecs[13] = unpack_hi_128(abcdefgh_5, ijklmnop_5);1018vecs[14] = unpack_hi_128(abcdefgh_6, ijklmnop_6);1019vecs[15] = unpack_hi_128(abcdefgh_7, ijklmnop_7);1020}10211022INLINE void transpose_msg_vecs16(const uint8_t *const *inputs,1023size_t block_offset, __m512i out[16]) {1024out[0] = loadu_512(&inputs[0][block_offset]);1025out[1] = loadu_512(&inputs[1][block_offset]);1026out[2] = loadu_512(&inputs[2][block_offset]);1027out[3] = loadu_512(&inputs[3][block_offset]);1028out[4] = loadu_512(&inputs[4][block_offset]);1029out[5] = loadu_512(&inputs[5][block_offset]);1030out[6] = loadu_512(&inputs[6][block_offset]);1031out[7] = loadu_512(&inputs[7][block_offset]);1032out[8] = loadu_512(&inputs[8][block_offset]);1033out[9] = loadu_512(&inputs[9][block_offset]);1034out[10] = loadu_512(&inputs[10][block_offset]);1035out[11] = loadu_512(&inputs[11][block_offset]);1036out[12] = loadu_512(&inputs[12][block_offset]);1037out[13] = loadu_512(&inputs[13][block_offset]);1038out[14] = loadu_512(&inputs[14][block_offset]);1039out[15] = loadu_512(&inputs[15][block_offset]);1040for (size_t i = 0; i < 16; ++i) {1041_mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);1042}1043transpose_vecs_512(out);1044}10451046INLINE void load_counters16(uint64_t counter, bool increment_counter,1047__m512i *out_lo, __m512i *out_hi) {1048const __m512i mask = _mm512_set1_epi32(-(int32_t)increment_counter);1049const __m512i add0 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);1050const __m512i add1 = _mm512_and_si512(mask, add0);1051__m512i l = _mm512_add_epi32(_mm512_set1_epi32((int32_t)counter), add1);1052__mmask16 carry = _mm512_cmp_epu32_mask(l, add1, _MM_CMPINT_LT);1053__m512i h = _mm512_mask_add_epi32(_mm512_set1_epi32((int32_t)(counter >> 32)), carry, _mm512_set1_epi32((int32_t)(counter >> 32)), _mm512_set1_epi32(1));1054*out_lo = l;1055*out_hi = h;1056}10571058static1059void blake3_hash16_avx512(const uint8_t *const *inputs, size_t blocks,1060const uint32_t key[8], uint64_t counter,1061bool increment_counter, uint8_t flags,1062uint8_t flags_start, uint8_t flags_end,1063uint8_t *out) {1064__m512i h_vecs[8] = {1065set1_512(key[0]), set1_512(key[1]), set1_512(key[2]), set1_512(key[3]),1066set1_512(key[4]), set1_512(key[5]), set1_512(key[6]), set1_512(key[7]),1067};1068__m512i counter_low_vec, counter_high_vec;1069load_counters16(counter, increment_counter, &counter_low_vec,1070&counter_high_vec);1071uint8_t block_flags = flags | flags_start;10721073for (size_t block = 0; block < blocks; block++) {1074if (block + 1 == blocks) {1075block_flags |= flags_end;1076}1077__m512i block_len_vec = set1_512(BLAKE3_BLOCK_LEN);1078__m512i block_flags_vec = set1_512(block_flags);1079__m512i msg_vecs[16];1080transpose_msg_vecs16(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);10811082__m512i v[16] = {1083h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],1084h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],1085set1_512(IV[0]), set1_512(IV[1]), set1_512(IV[2]), set1_512(IV[3]),1086counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,1087};1088round_fn16(v, msg_vecs, 0);1089round_fn16(v, msg_vecs, 1);1090round_fn16(v, msg_vecs, 2);1091round_fn16(v, msg_vecs, 3);1092round_fn16(v, msg_vecs, 4);1093round_fn16(v, msg_vecs, 5);1094round_fn16(v, msg_vecs, 6);1095h_vecs[0] = xor_512(v[0], v[8]);1096h_vecs[1] = xor_512(v[1], v[9]);1097h_vecs[2] = xor_512(v[2], v[10]);1098h_vecs[3] = xor_512(v[3], v[11]);1099h_vecs[4] = xor_512(v[4], v[12]);1100h_vecs[5] = xor_512(v[5], v[13]);1101h_vecs[6] = xor_512(v[6], v[14]);1102h_vecs[7] = xor_512(v[7], v[15]);11031104block_flags = flags;1105}11061107// transpose_vecs_512 operates on a 16x16 matrix of words, but we only have 81108// state vectors. Pad the matrix with zeros. After transposition, store the1109// lower half of each vector.1110__m512i padded[16] = {1111h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],1112h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],1113set1_512(0), set1_512(0), set1_512(0), set1_512(0),1114set1_512(0), set1_512(0), set1_512(0), set1_512(0),1115};1116transpose_vecs_512(padded);1117_mm256_mask_storeu_epi32(&out[0 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[0]));1118_mm256_mask_storeu_epi32(&out[1 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[1]));1119_mm256_mask_storeu_epi32(&out[2 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[2]));1120_mm256_mask_storeu_epi32(&out[3 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[3]));1121_mm256_mask_storeu_epi32(&out[4 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[4]));1122_mm256_mask_storeu_epi32(&out[5 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[5]));1123_mm256_mask_storeu_epi32(&out[6 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[6]));1124_mm256_mask_storeu_epi32(&out[7 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[7]));1125_mm256_mask_storeu_epi32(&out[8 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[8]));1126_mm256_mask_storeu_epi32(&out[9 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[9]));1127_mm256_mask_storeu_epi32(&out[10 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[10]));1128_mm256_mask_storeu_epi32(&out[11 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[11]));1129_mm256_mask_storeu_epi32(&out[12 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[12]));1130_mm256_mask_storeu_epi32(&out[13 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[13]));1131_mm256_mask_storeu_epi32(&out[14 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[14]));1132_mm256_mask_storeu_epi32(&out[15 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[15]));1133}11341135/*1136* ----------------------------------------------------------------------------1137* hash_many_avx5121138* ----------------------------------------------------------------------------1139*/11401141INLINE void hash_one_avx512(const uint8_t *input, size_t blocks,1142const uint32_t key[8], uint64_t counter,1143uint8_t flags, uint8_t flags_start,1144uint8_t flags_end, uint8_t out[BLAKE3_OUT_LEN]) {1145uint32_t cv[8];1146memcpy(cv, key, BLAKE3_KEY_LEN);1147uint8_t block_flags = flags | flags_start;1148while (blocks > 0) {1149if (blocks == 1) {1150block_flags |= flags_end;1151}1152blake3_compress_in_place_avx512(cv, input, BLAKE3_BLOCK_LEN, counter,1153block_flags);1154input = &input[BLAKE3_BLOCK_LEN];1155blocks -= 1;1156block_flags = flags;1157}1158memcpy(out, cv, BLAKE3_OUT_LEN);1159}11601161void blake3_hash_many_avx512(const uint8_t *const *inputs, size_t num_inputs,1162size_t blocks, const uint32_t key[8],1163uint64_t counter, bool increment_counter,1164uint8_t flags, uint8_t flags_start,1165uint8_t flags_end, uint8_t *out) {1166while (num_inputs >= 16) {1167blake3_hash16_avx512(inputs, blocks, key, counter, increment_counter, flags,1168flags_start, flags_end, out);1169if (increment_counter) {1170counter += 16;1171}1172inputs += 16;1173num_inputs -= 16;1174out = &out[16 * BLAKE3_OUT_LEN];1175}1176while (num_inputs >= 8) {1177blake3_hash8_avx512(inputs, blocks, key, counter, increment_counter, flags,1178flags_start, flags_end, out);1179if (increment_counter) {1180counter += 8;1181}1182inputs += 8;1183num_inputs -= 8;1184out = &out[8 * BLAKE3_OUT_LEN];1185}1186while (num_inputs >= 4) {1187blake3_hash4_avx512(inputs, blocks, key, counter, increment_counter, flags,1188flags_start, flags_end, out);1189if (increment_counter) {1190counter += 4;1191}1192inputs += 4;1193num_inputs -= 4;1194out = &out[4 * BLAKE3_OUT_LEN];1195}1196while (num_inputs > 0) {1197hash_one_avx512(inputs[0], blocks, key, counter, flags, flags_start,1198flags_end, out);1199if (increment_counter) {1200counter += 1;1201}1202inputs += 1;1203num_inputs -= 1;1204out = &out[BLAKE3_OUT_LEN];1205}1206}120712081209