Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
official-stockfish
GitHub Repository: official-stockfish/Stockfish
Path: blob/master/src/nnue/layers/affine_transform_sparse_input.h
376 views
1
/*
2
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3
Copyright (C) 2004-2025 The Stockfish developers (see AUTHORS file)
4
5
Stockfish is free software: you can redistribute it and/or modify
6
it under the terms of the GNU General Public License as published by
7
the Free Software Foundation, either version 3 of the License, or
8
(at your option) any later version.
9
10
Stockfish is distributed in the hope that it will be useful,
11
but WITHOUT ANY WARRANTY; without even the implied warranty of
12
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
GNU General Public License for more details.
14
15
You should have received a copy of the GNU General Public License
16
along with this program. If not, see <http://www.gnu.org/licenses/>.
17
*/
18
19
// Definition of layer AffineTransformSparseInput of NNUE evaluation function
20
21
#ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
22
#define NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
23
24
#include <algorithm>
25
#include <cstdint>
26
#include <iostream>
27
28
#include "../../bitboard.h"
29
#include "../simd.h"
30
#include "../nnue_common.h"
31
32
/*
33
This file contains the definition for a fully connected layer (aka affine transform) with block sparse input.
34
*/
35
36
namespace Stockfish::Eval::NNUE::Layers {
37
38
#if (USE_SSSE3 | (USE_NEON >= 8))
39
static constexpr int lsb_index64[64] = {
40
0, 47, 1, 56, 48, 27, 2, 60, 57, 49, 41, 37, 28, 16, 3, 61, 54, 58, 35, 52, 50, 42,
41
21, 44, 38, 32, 29, 23, 17, 11, 4, 62, 46, 55, 26, 59, 40, 36, 15, 53, 34, 51, 20, 43,
42
31, 22, 10, 45, 25, 39, 14, 33, 19, 30, 9, 24, 13, 18, 8, 12, 7, 6, 5, 63};
43
44
constexpr int constexpr_lsb(uint64_t bb) {
45
assert(bb != 0);
46
constexpr uint64_t debruijn64 = 0x03F79D71B4CB0A89ULL;
47
return lsb_index64[((bb ^ (bb - 1)) * debruijn64) >> 58];
48
}
49
50
alignas(CacheLineSize) static constexpr struct OffsetIndices {
51
52
std::uint16_t offset_indices[256][8];
53
54
constexpr OffsetIndices() :
55
offset_indices() {
56
for (int i = 0; i < 256; ++i)
57
{
58
std::uint64_t j = i, k = 0;
59
while (j)
60
{
61
offset_indices[i][k++] = constexpr_lsb(j);
62
j &= j - 1;
63
}
64
while (k < 8)
65
offset_indices[i][k++] = 0;
66
}
67
}
68
69
} Lookup;
70
71
#if defined(__GNUC__) || defined(__clang__)
72
#define RESTRICT __restrict__
73
#elif defined(_MSC_VER)
74
#define RESTRICT __restrict
75
#else
76
#define RESTRICT
77
#endif
78
79
// Find indices of nonzero numbers in an int32_t array
80
template<const IndexType InputDimensions>
81
void find_nnz(const std::int32_t* RESTRICT input,
82
std::uint16_t* RESTRICT out,
83
IndexType& count_out) {
84
85
#if defined(USE_AVX512ICL)
86
87
constexpr IndexType SimdWidthIn = 16; // 512 bits / 32 bits
88
constexpr IndexType SimdWidthOut = 32; // 512 bits / 16 bits
89
constexpr IndexType NumChunks = InputDimensions / SimdWidthOut;
90
const __m512i increment = _mm512_set1_epi16(SimdWidthOut);
91
__m512i base = _mm512_set_epi16( // Same permute order as _mm512_packus_epi32()
92
31, 30, 29, 28, 15, 14, 13, 12, 27, 26, 25, 24, 11, 10, 9, 8, 23, 22, 21, 20, 7, 6, 5, 4, 19,
93
18, 17, 16, 3, 2, 1, 0);
94
95
IndexType count = 0;
96
for (IndexType i = 0; i < NumChunks; ++i)
97
{
98
const __m512i inputV0 = _mm512_load_si512(input + i * 2 * SimdWidthIn);
99
const __m512i inputV1 = _mm512_load_si512(input + i * 2 * SimdWidthIn + SimdWidthIn);
100
101
// Get a bitmask and gather non zero indices
102
const __m512i inputV01 = _mm512_packus_epi32(inputV0, inputV1);
103
const __mmask32 nnzMask = _mm512_test_epi16_mask(inputV01, inputV01);
104
105
// Avoid _mm512_mask_compressstoreu_epi16() as it's 256 uOps on Zen4
106
__m512i nnz = _mm512_maskz_compress_epi16(nnzMask, base);
107
_mm512_storeu_si512(out + count, nnz);
108
109
count += popcount(nnzMask);
110
base = _mm512_add_epi16(base, increment);
111
}
112
count_out = count;
113
114
#elif defined(USE_AVX512)
115
116
constexpr IndexType SimdWidth = 16; // 512 bits / 32 bits
117
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
118
const __m512i increment = _mm512_set1_epi32(SimdWidth);
119
__m512i base = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
120
121
IndexType count = 0;
122
for (IndexType i = 0; i < NumChunks; ++i)
123
{
124
const __m512i inputV = _mm512_load_si512(input + i * SimdWidth);
125
126
// Get a bitmask and gather non zero indices
127
const __mmask16 nnzMask = _mm512_test_epi32_mask(inputV, inputV);
128
const __m512i nnzV = _mm512_maskz_compress_epi32(nnzMask, base);
129
_mm512_mask_cvtepi32_storeu_epi16(out + count, 0xFFFF, nnzV);
130
count += popcount(nnzMask);
131
base = _mm512_add_epi32(base, increment);
132
}
133
count_out = count;
134
135
#else
136
137
using namespace SIMD;
138
139
constexpr IndexType InputSimdWidth = sizeof(vec_uint_t) / sizeof(std::int32_t);
140
// Inputs are processed InputSimdWidth at a time and outputs are processed 8 at a time so we process in chunks of max(InputSimdWidth, 8)
141
constexpr IndexType ChunkSize = std::max<IndexType>(InputSimdWidth, 8);
142
constexpr IndexType NumChunks = InputDimensions / ChunkSize;
143
constexpr IndexType InputsPerChunk = ChunkSize / InputSimdWidth;
144
constexpr IndexType OutputsPerChunk = ChunkSize / 8;
145
146
const auto inputVector = reinterpret_cast<const vec_uint_t*>(input);
147
IndexType count = 0;
148
vec128_t base = vec128_zero;
149
const vec128_t increment = vec128_set_16(8);
150
for (IndexType i = 0; i < NumChunks; ++i)
151
{
152
// bitmask of nonzero values in this chunk
153
unsigned nnz = 0;
154
for (IndexType j = 0; j < InputsPerChunk; ++j)
155
{
156
const vec_uint_t inputChunk = inputVector[i * InputsPerChunk + j];
157
nnz |= unsigned(vec_nnz(inputChunk)) << (j * InputSimdWidth);
158
}
159
for (IndexType j = 0; j < OutputsPerChunk; ++j)
160
{
161
const unsigned lookup = (nnz >> (j * 8)) & 0xFF;
162
const vec128_t offsets =
163
vec128_load(reinterpret_cast<const vec128_t*>(&Lookup.offset_indices[lookup]));
164
vec128_storeu(reinterpret_cast<vec128_t*>(out + count), vec128_add(base, offsets));
165
count += popcount(lookup);
166
base = vec128_add(base, increment);
167
}
168
}
169
count_out = count;
170
#endif
171
}
172
173
#endif
174
175
// Sparse input implementation
176
template<IndexType InDims, IndexType OutDims>
177
class AffineTransformSparseInput {
178
public:
179
// Input/output type
180
using InputType = std::uint8_t;
181
using OutputType = std::int32_t;
182
183
// Number of input/output dimensions
184
static constexpr IndexType InputDimensions = InDims;
185
static constexpr IndexType OutputDimensions = OutDims;
186
187
static_assert(OutputDimensions % 16 == 0,
188
"Only implemented for OutputDimensions divisible by 16.");
189
190
static constexpr IndexType PaddedInputDimensions =
191
ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
192
static constexpr IndexType PaddedOutputDimensions =
193
ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);
194
195
#if (USE_SSSE3 | (USE_NEON >= 8))
196
static constexpr IndexType ChunkSize = 4;
197
#else
198
static constexpr IndexType ChunkSize = 1;
199
#endif
200
201
using OutputBuffer = OutputType[PaddedOutputDimensions];
202
203
// Hash value embedded in the evaluation file
204
static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
205
std::uint32_t hashValue = 0xCC03DAE4u;
206
hashValue += OutputDimensions;
207
hashValue ^= prevHash >> 1;
208
hashValue ^= prevHash << 31;
209
return hashValue;
210
}
211
212
static constexpr IndexType get_weight_index_scrambled(IndexType i) {
213
return (i / ChunkSize) % (PaddedInputDimensions / ChunkSize) * OutputDimensions * ChunkSize
214
+ i / PaddedInputDimensions * ChunkSize + i % ChunkSize;
215
}
216
217
static constexpr IndexType get_weight_index(IndexType i) {
218
#if (USE_SSSE3 | (USE_NEON >= 8))
219
return get_weight_index_scrambled(i);
220
#else
221
return i;
222
#endif
223
}
224
225
// Read network parameters
226
bool read_parameters(std::istream& stream) {
227
read_little_endian<BiasType>(stream, biases, OutputDimensions);
228
for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
229
weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
230
231
return !stream.fail();
232
}
233
234
// Write network parameters
235
bool write_parameters(std::ostream& stream) const {
236
write_little_endian<BiasType>(stream, biases, OutputDimensions);
237
238
for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
239
write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
240
241
return !stream.fail();
242
}
243
// Forward propagation
244
void propagate(const InputType* input, OutputType* output) const {
245
246
#if (USE_SSSE3 | (USE_NEON >= 8))
247
#if defined(USE_AVX512)
248
using invec_t = __m512i;
249
using outvec_t = __m512i;
250
#define vec_set_32 _mm512_set1_epi32
251
#define vec_add_dpbusd_32 SIMD::m512_add_dpbusd_epi32
252
#elif defined(USE_AVX2)
253
using invec_t = __m256i;
254
using outvec_t = __m256i;
255
#define vec_set_32 _mm256_set1_epi32
256
#define vec_add_dpbusd_32 SIMD::m256_add_dpbusd_epi32
257
#elif defined(USE_SSSE3)
258
using invec_t = __m128i;
259
using outvec_t = __m128i;
260
#define vec_set_32 _mm_set1_epi32
261
#define vec_add_dpbusd_32 SIMD::m128_add_dpbusd_epi32
262
#elif defined(USE_NEON_DOTPROD)
263
using invec_t = int8x16_t;
264
using outvec_t = int32x4_t;
265
#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))
266
#define vec_add_dpbusd_32 SIMD::dotprod_m128_add_dpbusd_epi32
267
#elif defined(USE_NEON)
268
using invec_t = int8x16_t;
269
using outvec_t = int32x4_t;
270
#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))
271
#define vec_add_dpbusd_32 SIMD::neon_m128_add_dpbusd_epi32
272
#endif
273
static constexpr IndexType OutputSimdWidth = sizeof(outvec_t) / sizeof(OutputType);
274
275
constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / ChunkSize;
276
constexpr IndexType NumRegs = OutputDimensions / OutputSimdWidth;
277
std::uint16_t nnz[NumChunks];
278
IndexType count;
279
280
const auto input32 = reinterpret_cast<const std::int32_t*>(input);
281
282
// Find indices of nonzero 32-bit blocks
283
find_nnz<NumChunks>(input32, nnz, count);
284
285
const outvec_t* biasvec = reinterpret_cast<const outvec_t*>(biases);
286
outvec_t acc[NumRegs];
287
for (IndexType k = 0; k < NumRegs; ++k)
288
acc[k] = biasvec[k];
289
290
for (IndexType j = 0; j < count; ++j)
291
{
292
const auto i = nnz[j];
293
const invec_t in = vec_set_32(input32[i]);
294
const auto col =
295
reinterpret_cast<const invec_t*>(&weights[i * OutputDimensions * ChunkSize]);
296
for (IndexType k = 0; k < NumRegs; ++k)
297
vec_add_dpbusd_32(acc[k], in, col[k]);
298
}
299
300
outvec_t* outptr = reinterpret_cast<outvec_t*>(output);
301
for (IndexType k = 0; k < NumRegs; ++k)
302
outptr[k] = acc[k];
303
#undef vec_set_32
304
#undef vec_add_dpbusd_32
305
#else
306
// Use dense implementation for the other architectures.
307
affine_transform_non_ssse3<InputDimensions, PaddedInputDimensions, OutputDimensions>(
308
output, weights, biases, input);
309
#endif
310
}
311
312
private:
313
using BiasType = OutputType;
314
using WeightType = std::int8_t;
315
316
alignas(CacheLineSize) BiasType biases[OutputDimensions];
317
alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
318
};
319
320
} // namespace Stockfish::Eval::NNUE::Layers
321
322
#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
323
324