Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
official-stockfish
GitHub Repository: official-stockfish/Stockfish
Path: blob/master/src/nnue/layers/affine_transform_sparse_input.h
635 views
1
/*
2
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3
Copyright (C) 2004-2026 The Stockfish developers (see AUTHORS file)
4
5
Stockfish is free software: you can redistribute it and/or modify
6
it under the terms of the GNU General Public License as published by
7
the Free Software Foundation, either version 3 of the License, or
8
(at your option) any later version.
9
10
Stockfish is distributed in the hope that it will be useful,
11
but WITHOUT ANY WARRANTY; without even the implied warranty of
12
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
GNU General Public License for more details.
14
15
You should have received a copy of the GNU General Public License
16
along with this program. If not, see <http://www.gnu.org/licenses/>.
17
*/
18
19
// Definition of layer AffineTransformSparseInput of NNUE evaluation function
20
21
#ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
22
#define NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
23
24
#include <algorithm>
25
#include <cstddef>
26
#include <cstdint>
27
#include <iostream>
28
29
#include "../../bitboard.h"
30
#include "../../memory.h"
31
#include "../simd.h"
32
#include "../nnue_common.h"
33
34
/*
35
This file contains the definition for a fully connected layer (aka affine transform) with block sparse input.
36
*/
37
38
namespace Stockfish::Eval::NNUE::Layers {
39
40
#if (USE_SSSE3 | (USE_NEON >= 8))
41
static constexpr int lsb_index64[64] = {
42
0, 47, 1, 56, 48, 27, 2, 60, 57, 49, 41, 37, 28, 16, 3, 61, 54, 58, 35, 52, 50, 42,
43
21, 44, 38, 32, 29, 23, 17, 11, 4, 62, 46, 55, 26, 59, 40, 36, 15, 53, 34, 51, 20, 43,
44
31, 22, 10, 45, 25, 39, 14, 33, 19, 30, 9, 24, 13, 18, 8, 12, 7, 6, 5, 63};
45
46
constexpr int constexpr_lsb(uint64_t bb) {
47
assert(bb != 0);
48
constexpr uint64_t debruijn64 = 0x03F79D71B4CB0A89ULL;
49
return lsb_index64[((bb ^ (bb - 1)) * debruijn64) >> 58];
50
}
51
52
alignas(CacheLineSize) static constexpr struct OffsetIndices {
53
54
std::uint16_t offset_indices[256][8];
55
56
constexpr OffsetIndices() :
57
offset_indices() {
58
for (int i = 0; i < 256; ++i)
59
{
60
std::uint64_t j = i, k = 0;
61
while (j)
62
{
63
offset_indices[i][k++] = constexpr_lsb(j);
64
j &= j - 1;
65
}
66
while (k < 8)
67
offset_indices[i][k++] = 0;
68
}
69
}
70
71
} Lookup;
72
73
#if defined(__GNUC__) || defined(__clang__)
74
#define RESTRICT __restrict__
75
#elif defined(_MSC_VER)
76
#define RESTRICT __restrict
77
#else
78
#define RESTRICT
79
#endif
80
81
// Find indices of nonzero 32-bit values in a packed byte buffer.
82
// The input pointer addresses a sequence of 32-bit blocks stored in a
83
// std::uint8_t array.
84
template<const IndexType InputDimensions>
85
void find_nnz(const std::uint8_t* RESTRICT input,
86
std::uint16_t* RESTRICT out,
87
IndexType& count_out) {
88
89
#if defined(USE_AVX512ICL)
90
91
constexpr IndexType SimdWidthIn = 64; // 512 bits
92
constexpr IndexType SimdWidthOut = 32; // 512 bits / 16 bits
93
constexpr IndexType NumChunks = InputDimensions / SimdWidthOut;
94
const __m512i increment = _mm512_set1_epi16(SimdWidthOut);
95
__m512i base = _mm512_set_epi16( // Same permute order as _mm512_packus_epi32()
96
31, 30, 29, 28, 15, 14, 13, 12, 27, 26, 25, 24, 11, 10, 9, 8, 23, 22, 21, 20, 7, 6, 5, 4, 19,
97
18, 17, 16, 3, 2, 1, 0);
98
99
IndexType count = 0;
100
for (IndexType i = 0; i < NumChunks; ++i)
101
{
102
const __m512i inputV0 = _mm512_load_si512(input + i * 2 * SimdWidthIn);
103
const __m512i inputV1 = _mm512_load_si512(input + i * 2 * SimdWidthIn + SimdWidthIn);
104
105
// Get a bitmask and gather non zero indices
106
const __m512i inputV01 = _mm512_packus_epi32(inputV0, inputV1);
107
const __mmask32 nnzMask = _mm512_test_epi16_mask(inputV01, inputV01);
108
109
// Avoid _mm512_mask_compressstoreu_epi16() as it's 256 uOps on Zen4
110
__m512i nnz = _mm512_maskz_compress_epi16(nnzMask, base);
111
_mm512_storeu_si512(out + count, nnz);
112
113
count += popcount(nnzMask);
114
base = _mm512_add_epi16(base, increment);
115
}
116
count_out = count;
117
118
#elif defined(USE_AVX512)
119
120
constexpr IndexType SimdWidth = 16; // 512 bits / 32 bits
121
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
122
const __m512i increment = _mm512_set1_epi32(SimdWidth);
123
__m512i base = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
124
125
IndexType count = 0;
126
for (IndexType i = 0; i < NumChunks; ++i)
127
{
128
const __m512i inputV = _mm512_load_si512(input + i * SimdWidth * sizeof(std::uint32_t));
129
130
// Get a bitmask and gather non zero indices
131
const __mmask16 nnzMask = _mm512_test_epi32_mask(inputV, inputV);
132
const __m512i nnzV = _mm512_maskz_compress_epi32(nnzMask, base);
133
_mm512_mask_cvtepi32_storeu_epi16(out + count, 0xFFFF, nnzV);
134
count += popcount(nnzMask);
135
base = _mm512_add_epi32(base, increment);
136
}
137
count_out = count;
138
139
#else
140
141
using namespace SIMD;
142
143
constexpr IndexType InputSimdWidth = sizeof(vec_uint_t) / sizeof(std::int32_t);
144
// Outputs are processed 8 elements at a time, even if the SIMD width is narrower
145
constexpr IndexType ChunkSize = 8;
146
constexpr IndexType NumChunks = InputDimensions / ChunkSize;
147
constexpr IndexType InputsPerChunk = ChunkSize / InputSimdWidth;
148
149
static_assert(InputsPerChunk > 0 && "SIMD width too wide");
150
151
const auto inputVector = reinterpret_cast<const vec_uint_t*>(input);
152
IndexType count = 0;
153
vec128_t base = vec128_zero;
154
const vec128_t increment = vec128_set_16(8);
155
for (IndexType i = 0; i < NumChunks; ++i)
156
{
157
// bitmask of nonzero values in this chunk
158
unsigned nnz = 0;
159
for (IndexType j = 0; j < InputsPerChunk; ++j)
160
{
161
const vec_uint_t inputChunk = inputVector[i * InputsPerChunk + j];
162
nnz |= unsigned(vec_nnz(inputChunk)) << (j * InputSimdWidth);
163
}
164
const vec128_t offsets =
165
vec128_load(reinterpret_cast<const vec128_t*>(&Lookup.offset_indices[nnz]));
166
vec128_storeu(reinterpret_cast<vec128_t*>(out + count), vec128_add(base, offsets));
167
count += popcount(nnz);
168
base = vec128_add(base, increment);
169
}
170
count_out = count;
171
#endif
172
}
173
174
#endif
175
176
// Sparse input implementation
177
template<IndexType InDims, IndexType OutDims>
178
class AffineTransformSparseInput {
179
public:
180
// Input/output type
181
using InputType = std::uint8_t;
182
using OutputType = std::int32_t;
183
184
// Number of input/output dimensions
185
static constexpr IndexType InputDimensions = InDims;
186
static constexpr IndexType OutputDimensions = OutDims;
187
188
static_assert(OutputDimensions % 16 == 0,
189
"Only implemented for OutputDimensions divisible by 16.");
190
191
static constexpr IndexType PaddedInputDimensions =
192
ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
193
static constexpr IndexType PaddedOutputDimensions =
194
ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);
195
196
#if (USE_SSSE3 | (USE_NEON >= 8))
197
static constexpr IndexType ChunkSize = 4;
198
#else
199
static constexpr IndexType ChunkSize = 1;
200
#endif
201
202
using OutputBuffer = OutputType[PaddedOutputDimensions];
203
204
// Hash value embedded in the evaluation file
205
static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
206
std::uint32_t hashValue = 0xCC03DAE4u;
207
hashValue += OutputDimensions;
208
hashValue ^= prevHash >> 1;
209
hashValue ^= prevHash << 31;
210
return hashValue;
211
}
212
213
static constexpr IndexType get_weight_index_scrambled(IndexType i) {
214
return (i / ChunkSize) % (PaddedInputDimensions / ChunkSize) * OutputDimensions * ChunkSize
215
+ i / PaddedInputDimensions * ChunkSize + i % ChunkSize;
216
}
217
218
static constexpr IndexType get_weight_index(IndexType i) {
219
#if (USE_SSSE3 | (USE_NEON >= 8))
220
return get_weight_index_scrambled(i);
221
#else
222
return i;
223
#endif
224
}
225
226
// Read network parameters
227
bool read_parameters(std::istream& stream) {
228
read_little_endian<BiasType>(stream, biases, OutputDimensions);
229
for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
230
weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
231
232
return !stream.fail();
233
}
234
235
// Write network parameters
236
bool write_parameters(std::ostream& stream) const {
237
write_little_endian<BiasType>(stream, biases, OutputDimensions);
238
239
for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
240
write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
241
242
return !stream.fail();
243
}
244
245
std::size_t get_content_hash() const {
246
std::size_t h = 0;
247
hash_combine(h, get_raw_data_hash(biases));
248
hash_combine(h, get_raw_data_hash(weights));
249
hash_combine(h, get_hash_value(0));
250
return h;
251
}
252
253
// Forward propagation
254
void propagate(const InputType* input, OutputType* output) const {
255
256
#if (USE_SSSE3 | (USE_NEON >= 8))
257
#if defined(USE_AVX512)
258
using invec_t = __m512i;
259
using outvec_t = __m512i;
260
#define vec_add_32 _mm512_add_epi32
261
#define vec_set_32 _mm512_set1_epi32
262
#define vec_add_dpbusd_32 SIMD::m512_add_dpbusd_epi32
263
#elif defined(USE_AVX2)
264
using invec_t = __m256i;
265
using outvec_t = __m256i;
266
#define vec_add_32 _mm256_add_epi32
267
#define vec_set_32 _mm256_set1_epi32
268
#define vec_add_dpbusd_32 SIMD::m256_add_dpbusd_epi32
269
#elif defined(USE_SSSE3)
270
using invec_t = __m128i;
271
using outvec_t = __m128i;
272
#define vec_set_32 _mm_set1_epi32
273
#define vec_add_dpbusd_32 SIMD::m128_add_dpbusd_epi32
274
#elif defined(USE_NEON_DOTPROD)
275
using invec_t = int8x16_t;
276
using outvec_t = int32x4_t;
277
#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))
278
#define vec_add_dpbusd_32 SIMD::dotprod_m128_add_dpbusd_epi32
279
#elif defined(USE_NEON)
280
using invec_t = int8x16_t;
281
using outvec_t = int32x4_t;
282
#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))
283
#define vec_add_dpbusd_32 SIMD::neon_m128_add_dpbusd_epi32
284
#endif
285
constexpr IndexType OutputSimdWidth = sizeof(outvec_t) / sizeof(OutputType);
286
constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / ChunkSize;
287
constexpr IndexType NumAccums = OutputDimensions / OutputSimdWidth;
288
// If we're using high-latency dot product instructions, split the accumulators
289
// to create 3 separate dependency chains and merge at the end
290
constexpr IndexType NumRegs =
291
#if defined(USE_VNNI)
292
3 * NumAccums;
293
#else
294
NumAccums;
295
#endif
296
std::uint16_t nnz[NumChunks];
297
IndexType count;
298
299
// Find indices of nonzero 32-bit blocks
300
find_nnz<NumChunks>(input, nnz, count);
301
302
const outvec_t* biasvec = reinterpret_cast<const outvec_t*>(biases);
303
outvec_t acc[NumRegs];
304
for (IndexType k = 0; k < NumAccums; ++k)
305
acc[k] = biasvec[k];
306
307
const auto* start = nnz;
308
const auto* end = nnz + count;
309
310
// convince GCC to not do weird pointer arithmetic in the following loop
311
const std::int8_t* weights_cp = weights;
312
#if defined(USE_VNNI)
313
for (IndexType k = NumAccums; k < NumRegs; ++k)
314
acc[k] = vec_zero();
315
316
while (start < end - 2)
317
{
318
const std::ptrdiff_t i0 = *start++;
319
const std::ptrdiff_t i1 = *start++;
320
const std::ptrdiff_t i2 = *start++;
321
const invec_t in0 =
322
vec_set_32(load_as<std::int32_t>(input + i0 * sizeof(std::int32_t)));
323
const invec_t in1 =
324
vec_set_32(load_as<std::int32_t>(input + i1 * sizeof(std::int32_t)));
325
const invec_t in2 =
326
vec_set_32(load_as<std::int32_t>(input + i2 * sizeof(std::int32_t)));
327
const auto col0 =
328
reinterpret_cast<const invec_t*>(&weights_cp[i0 * OutputDimensions * ChunkSize]);
329
const auto col1 =
330
reinterpret_cast<const invec_t*>(&weights_cp[i1 * OutputDimensions * ChunkSize]);
331
const auto col2 =
332
reinterpret_cast<const invec_t*>(&weights_cp[i2 * OutputDimensions * ChunkSize]);
333
for (IndexType k = 0; k < NumAccums; ++k)
334
{
335
vec_add_dpbusd_32(acc[k], in0, col0[k]);
336
vec_add_dpbusd_32(acc[k + NumAccums], in1, col1[k]);
337
vec_add_dpbusd_32(acc[k + 2 * NumAccums], in2, col2[k]);
338
}
339
}
340
for (IndexType k = 0; k < NumAccums; ++k)
341
acc[k] = vec_add_32(vec_add_32(acc[k], acc[k + NumAccums]), acc[k + 2 * NumAccums]);
342
#endif
343
while (start < end)
344
{
345
const std::ptrdiff_t i = *start++;
346
const invec_t in = vec_set_32(load_as<std::int32_t>(input + i * sizeof(std::int32_t)));
347
const auto col =
348
reinterpret_cast<const invec_t*>(&weights_cp[i * OutputDimensions * ChunkSize]);
349
for (IndexType k = 0; k < NumAccums; ++k)
350
vec_add_dpbusd_32(acc[k], in, col[k]);
351
}
352
353
outvec_t* outptr = reinterpret_cast<outvec_t*>(output);
354
for (IndexType k = 0; k < NumAccums; ++k)
355
outptr[k] = acc[k];
356
357
#undef vec_set_32
358
#undef vec_add_dpbusd_32
359
#ifdef vec_add_32
360
#undef vec_add_32
361
#endif
362
#else
363
// Use dense implementation for the other architectures.
364
affine_transform_non_ssse3<InputDimensions, PaddedInputDimensions, OutputDimensions>(
365
output, weights, biases, input);
366
#endif
367
}
368
369
private:
370
using BiasType = OutputType;
371
using WeightType = std::int8_t;
372
373
alignas(CacheLineSize) BiasType biases[OutputDimensions];
374
alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
375
};
376
377
} // namespace Stockfish::Eval::NNUE::Layers
378
379
#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
380
381