Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
official-stockfish
GitHub Repository: official-stockfish/stockfish
Path: blob/master/src/nnue/layers/affine_transform_sparse_input.h
394 views
1
/*
2
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3
Copyright (C) 2004-2025 The Stockfish developers (see AUTHORS file)
4
5
Stockfish is free software: you can redistribute it and/or modify
6
it under the terms of the GNU General Public License as published by
7
the Free Software Foundation, either version 3 of the License, or
8
(at your option) any later version.
9
10
Stockfish is distributed in the hope that it will be useful,
11
but WITHOUT ANY WARRANTY; without even the implied warranty of
12
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
GNU General Public License for more details.
14
15
You should have received a copy of the GNU General Public License
16
along with this program. If not, see <http://www.gnu.org/licenses/>.
17
*/
18
19
// Definition of layer AffineTransformSparseInput of NNUE evaluation function
20
21
#ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
22
#define NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
23
24
#include <algorithm>
25
#include <cstddef>
26
#include <cstdint>
27
#include <iostream>
28
29
#include "../../bitboard.h"
30
#include "../simd.h"
31
#include "../nnue_common.h"
32
33
/*
34
This file contains the definition for a fully connected layer (aka affine transform) with block sparse input.
35
*/
36
37
namespace Stockfish::Eval::NNUE::Layers {
38
39
#if (USE_SSSE3 | (USE_NEON >= 8))
40
static constexpr int lsb_index64[64] = {
41
0, 47, 1, 56, 48, 27, 2, 60, 57, 49, 41, 37, 28, 16, 3, 61, 54, 58, 35, 52, 50, 42,
42
21, 44, 38, 32, 29, 23, 17, 11, 4, 62, 46, 55, 26, 59, 40, 36, 15, 53, 34, 51, 20, 43,
43
31, 22, 10, 45, 25, 39, 14, 33, 19, 30, 9, 24, 13, 18, 8, 12, 7, 6, 5, 63};
44
45
constexpr int constexpr_lsb(uint64_t bb) {
46
assert(bb != 0);
47
constexpr uint64_t debruijn64 = 0x03F79D71B4CB0A89ULL;
48
return lsb_index64[((bb ^ (bb - 1)) * debruijn64) >> 58];
49
}
50
51
alignas(CacheLineSize) static constexpr struct OffsetIndices {
52
53
std::uint16_t offset_indices[256][8];
54
55
constexpr OffsetIndices() :
56
offset_indices() {
57
for (int i = 0; i < 256; ++i)
58
{
59
std::uint64_t j = i, k = 0;
60
while (j)
61
{
62
offset_indices[i][k++] = constexpr_lsb(j);
63
j &= j - 1;
64
}
65
while (k < 8)
66
offset_indices[i][k++] = 0;
67
}
68
}
69
70
} Lookup;
71
72
#if defined(__GNUC__) || defined(__clang__)
73
#define RESTRICT __restrict__
74
#elif defined(_MSC_VER)
75
#define RESTRICT __restrict
76
#else
77
#define RESTRICT
78
#endif
79
80
// Find indices of nonzero numbers in an int32_t array
81
template<const IndexType InputDimensions>
82
void find_nnz(const std::int32_t* RESTRICT input,
83
std::uint16_t* RESTRICT out,
84
IndexType& count_out) {
85
86
#if defined(USE_AVX512ICL)
87
88
constexpr IndexType SimdWidthIn = 16; // 512 bits / 32 bits
89
constexpr IndexType SimdWidthOut = 32; // 512 bits / 16 bits
90
constexpr IndexType NumChunks = InputDimensions / SimdWidthOut;
91
const __m512i increment = _mm512_set1_epi16(SimdWidthOut);
92
__m512i base = _mm512_set_epi16( // Same permute order as _mm512_packus_epi32()
93
31, 30, 29, 28, 15, 14, 13, 12, 27, 26, 25, 24, 11, 10, 9, 8, 23, 22, 21, 20, 7, 6, 5, 4, 19,
94
18, 17, 16, 3, 2, 1, 0);
95
96
IndexType count = 0;
97
for (IndexType i = 0; i < NumChunks; ++i)
98
{
99
const __m512i inputV0 = _mm512_load_si512(input + i * 2 * SimdWidthIn);
100
const __m512i inputV1 = _mm512_load_si512(input + i * 2 * SimdWidthIn + SimdWidthIn);
101
102
// Get a bitmask and gather non zero indices
103
const __m512i inputV01 = _mm512_packus_epi32(inputV0, inputV1);
104
const __mmask32 nnzMask = _mm512_test_epi16_mask(inputV01, inputV01);
105
106
// Avoid _mm512_mask_compressstoreu_epi16() as it's 256 uOps on Zen4
107
__m512i nnz = _mm512_maskz_compress_epi16(nnzMask, base);
108
_mm512_storeu_si512(out + count, nnz);
109
110
count += popcount(nnzMask);
111
base = _mm512_add_epi16(base, increment);
112
}
113
count_out = count;
114
115
#elif defined(USE_AVX512)
116
117
constexpr IndexType SimdWidth = 16; // 512 bits / 32 bits
118
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
119
const __m512i increment = _mm512_set1_epi32(SimdWidth);
120
__m512i base = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
121
122
IndexType count = 0;
123
for (IndexType i = 0; i < NumChunks; ++i)
124
{
125
const __m512i inputV = _mm512_load_si512(input + i * SimdWidth);
126
127
// Get a bitmask and gather non zero indices
128
const __mmask16 nnzMask = _mm512_test_epi32_mask(inputV, inputV);
129
const __m512i nnzV = _mm512_maskz_compress_epi32(nnzMask, base);
130
_mm512_mask_cvtepi32_storeu_epi16(out + count, 0xFFFF, nnzV);
131
count += popcount(nnzMask);
132
base = _mm512_add_epi32(base, increment);
133
}
134
count_out = count;
135
136
#else
137
138
using namespace SIMD;
139
140
constexpr IndexType InputSimdWidth = sizeof(vec_uint_t) / sizeof(std::int32_t);
141
// Inputs are processed InputSimdWidth at a time and outputs are processed 8 at a time so we process in chunks of max(InputSimdWidth, 8)
142
constexpr IndexType ChunkSize = std::max<IndexType>(InputSimdWidth, 8);
143
constexpr IndexType NumChunks = InputDimensions / ChunkSize;
144
constexpr IndexType InputsPerChunk = ChunkSize / InputSimdWidth;
145
constexpr IndexType OutputsPerChunk = ChunkSize / 8;
146
147
const auto inputVector = reinterpret_cast<const vec_uint_t*>(input);
148
IndexType count = 0;
149
vec128_t base = vec128_zero;
150
const vec128_t increment = vec128_set_16(8);
151
for (IndexType i = 0; i < NumChunks; ++i)
152
{
153
// bitmask of nonzero values in this chunk
154
unsigned nnz = 0;
155
for (IndexType j = 0; j < InputsPerChunk; ++j)
156
{
157
const vec_uint_t inputChunk = inputVector[i * InputsPerChunk + j];
158
nnz |= unsigned(vec_nnz(inputChunk)) << (j * InputSimdWidth);
159
}
160
for (IndexType j = 0; j < OutputsPerChunk; ++j)
161
{
162
const unsigned lookup = (nnz >> (j * 8)) & 0xFF;
163
const vec128_t offsets =
164
vec128_load(reinterpret_cast<const vec128_t*>(&Lookup.offset_indices[lookup]));
165
vec128_storeu(reinterpret_cast<vec128_t*>(out + count), vec128_add(base, offsets));
166
count += popcount(lookup);
167
base = vec128_add(base, increment);
168
}
169
}
170
count_out = count;
171
#endif
172
}
173
174
#endif
175
176
// Sparse input implementation
177
template<IndexType InDims, IndexType OutDims>
178
class AffineTransformSparseInput {
179
public:
180
// Input/output type
181
using InputType = std::uint8_t;
182
using OutputType = std::int32_t;
183
184
// Number of input/output dimensions
185
static constexpr IndexType InputDimensions = InDims;
186
static constexpr IndexType OutputDimensions = OutDims;
187
188
static_assert(OutputDimensions % 16 == 0,
189
"Only implemented for OutputDimensions divisible by 16.");
190
191
static constexpr IndexType PaddedInputDimensions =
192
ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
193
static constexpr IndexType PaddedOutputDimensions =
194
ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);
195
196
#if (USE_SSSE3 | (USE_NEON >= 8))
197
static constexpr IndexType ChunkSize = 4;
198
#else
199
static constexpr IndexType ChunkSize = 1;
200
#endif
201
202
using OutputBuffer = OutputType[PaddedOutputDimensions];
203
204
// Hash value embedded in the evaluation file
205
static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
206
std::uint32_t hashValue = 0xCC03DAE4u;
207
hashValue += OutputDimensions;
208
hashValue ^= prevHash >> 1;
209
hashValue ^= prevHash << 31;
210
return hashValue;
211
}
212
213
static constexpr IndexType get_weight_index_scrambled(IndexType i) {
214
return (i / ChunkSize) % (PaddedInputDimensions / ChunkSize) * OutputDimensions * ChunkSize
215
+ i / PaddedInputDimensions * ChunkSize + i % ChunkSize;
216
}
217
218
static constexpr IndexType get_weight_index(IndexType i) {
219
#if (USE_SSSE3 | (USE_NEON >= 8))
220
return get_weight_index_scrambled(i);
221
#else
222
return i;
223
#endif
224
}
225
226
// Read network parameters
227
bool read_parameters(std::istream& stream) {
228
read_little_endian<BiasType>(stream, biases, OutputDimensions);
229
for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
230
weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
231
232
return !stream.fail();
233
}
234
235
// Write network parameters
236
bool write_parameters(std::ostream& stream) const {
237
write_little_endian<BiasType>(stream, biases, OutputDimensions);
238
239
for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
240
write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
241
242
return !stream.fail();
243
}
244
// Forward propagation
245
void propagate(const InputType* input, OutputType* output) const {
246
247
#if (USE_SSSE3 | (USE_NEON >= 8))
248
#if defined(USE_AVX512)
249
using invec_t = __m512i;
250
using outvec_t = __m512i;
251
#define vec_add_32 _mm512_add_epi32
252
#define vec_set_32 _mm512_set1_epi32
253
#define vec_add_dpbusd_32 SIMD::m512_add_dpbusd_epi32
254
#elif defined(USE_AVX2)
255
using invec_t = __m256i;
256
using outvec_t = __m256i;
257
#define vec_add_32 _mm256_add_epi32
258
#define vec_set_32 _mm256_set1_epi32
259
#define vec_add_dpbusd_32 SIMD::m256_add_dpbusd_epi32
260
#elif defined(USE_SSSE3)
261
using invec_t = __m128i;
262
using outvec_t = __m128i;
263
#define vec_set_32 _mm_set1_epi32
264
#define vec_add_dpbusd_32 SIMD::m128_add_dpbusd_epi32
265
#elif defined(USE_NEON_DOTPROD)
266
using invec_t = int8x16_t;
267
using outvec_t = int32x4_t;
268
#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))
269
#define vec_add_dpbusd_32 SIMD::dotprod_m128_add_dpbusd_epi32
270
#elif defined(USE_NEON)
271
using invec_t = int8x16_t;
272
using outvec_t = int32x4_t;
273
#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))
274
#define vec_add_dpbusd_32 SIMD::neon_m128_add_dpbusd_epi32
275
#endif
276
constexpr IndexType OutputSimdWidth = sizeof(outvec_t) / sizeof(OutputType);
277
constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / ChunkSize;
278
constexpr IndexType NumAccums = OutputDimensions / OutputSimdWidth;
279
// If we're using high-latency dot product instructions, split the accumulators
280
// to create 3 separate dependency chains and merge at the end
281
constexpr IndexType NumRegs =
282
#if defined(USE_VNNI)
283
3 * NumAccums;
284
#else
285
NumAccums;
286
#endif
287
std::uint16_t nnz[NumChunks];
288
IndexType count;
289
290
const auto input32 = reinterpret_cast<const std::int32_t*>(input);
291
292
// Find indices of nonzero 32-bit blocks
293
find_nnz<NumChunks>(input32, nnz, count);
294
295
const outvec_t* biasvec = reinterpret_cast<const outvec_t*>(biases);
296
outvec_t acc[NumRegs];
297
for (IndexType k = 0; k < NumAccums; ++k)
298
acc[k] = biasvec[k];
299
300
const auto* start = nnz;
301
const auto* end = nnz + count;
302
303
// convince GCC to not do weird pointer arithmetic in the following loop
304
const std::int8_t* weights_cp = weights;
305
#if defined(USE_VNNI)
306
for (IndexType k = NumAccums; k < NumRegs; ++k)
307
acc[k] = vec_zero();
308
309
while (start < end - 2)
310
{
311
const std::ptrdiff_t i0 = *start++;
312
const std::ptrdiff_t i1 = *start++;
313
const std::ptrdiff_t i2 = *start++;
314
const invec_t in0 = vec_set_32(input32[i0]);
315
const invec_t in1 = vec_set_32(input32[i1]);
316
const invec_t in2 = vec_set_32(input32[i2]);
317
const auto col0 =
318
reinterpret_cast<const invec_t*>(&weights_cp[i0 * OutputDimensions * ChunkSize]);
319
const auto col1 =
320
reinterpret_cast<const invec_t*>(&weights_cp[i1 * OutputDimensions * ChunkSize]);
321
const auto col2 =
322
reinterpret_cast<const invec_t*>(&weights_cp[i2 * OutputDimensions * ChunkSize]);
323
for (IndexType k = 0; k < NumAccums; ++k)
324
{
325
vec_add_dpbusd_32(acc[k], in0, col0[k]);
326
vec_add_dpbusd_32(acc[k + NumAccums], in1, col1[k]);
327
vec_add_dpbusd_32(acc[k + 2 * NumAccums], in2, col2[k]);
328
}
329
}
330
for (IndexType k = 0; k < NumAccums; ++k)
331
acc[k] = vec_add_32(vec_add_32(acc[k], acc[k + NumAccums]), acc[k + 2 * NumAccums]);
332
#endif
333
while (start < end)
334
{
335
const std::ptrdiff_t i = *start++;
336
const invec_t in = vec_set_32(input32[i]);
337
const auto col =
338
reinterpret_cast<const invec_t*>(&weights_cp[i * OutputDimensions * ChunkSize]);
339
for (IndexType k = 0; k < NumAccums; ++k)
340
vec_add_dpbusd_32(acc[k], in, col[k]);
341
}
342
343
outvec_t* outptr = reinterpret_cast<outvec_t*>(output);
344
for (IndexType k = 0; k < NumAccums; ++k)
345
outptr[k] = acc[k];
346
347
#undef vec_set_32
348
#undef vec_add_dpbusd_32
349
#ifdef vec_add_32
350
#undef vec_add_32
351
#endif
352
#else
353
// Use dense implementation for the other architectures.
354
affine_transform_non_ssse3<InputDimensions, PaddedInputDimensions, OutputDimensions>(
355
output, weights, biases, input);
356
#endif
357
}
358
359
private:
360
using BiasType = OutputType;
361
using WeightType = std::int8_t;
362
363
alignas(CacheLineSize) BiasType biases[OutputDimensions];
364
alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
365
};
366
367
} // namespace Stockfish::Eval::NNUE::Layers
368
369
#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
370
371