Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
official-stockfish
GitHub Repository: official-stockfish/Stockfish
Path: blob/master/src/nnue/layers/clipped_relu.h
376 views
1
/*
2
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3
Copyright (C) 2004-2025 The Stockfish developers (see AUTHORS file)
4
5
Stockfish is free software: you can redistribute it and/or modify
6
it under the terms of the GNU General Public License as published by
7
the Free Software Foundation, either version 3 of the License, or
8
(at your option) any later version.
9
10
Stockfish is distributed in the hope that it will be useful,
11
but WITHOUT ANY WARRANTY; without even the implied warranty of
12
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
GNU General Public License for more details.
14
15
You should have received a copy of the GNU General Public License
16
along with this program. If not, see <http://www.gnu.org/licenses/>.
17
*/
18
19
// Definition of layer ClippedReLU of NNUE evaluation function
20
21
#ifndef NNUE_LAYERS_CLIPPED_RELU_H_INCLUDED
22
#define NNUE_LAYERS_CLIPPED_RELU_H_INCLUDED
23
24
#include <algorithm>
25
#include <cstdint>
26
#include <iosfwd>
27
28
#include "../nnue_common.h"
29
30
namespace Stockfish::Eval::NNUE::Layers {
31
32
// Clipped ReLU
33
template<IndexType InDims>
34
class ClippedReLU {
35
public:
36
// Input/output type
37
using InputType = std::int32_t;
38
using OutputType = std::uint8_t;
39
40
// Number of input/output dimensions
41
static constexpr IndexType InputDimensions = InDims;
42
static constexpr IndexType OutputDimensions = InputDimensions;
43
static constexpr IndexType PaddedOutputDimensions =
44
ceil_to_multiple<IndexType>(OutputDimensions, 32);
45
46
using OutputBuffer = OutputType[PaddedOutputDimensions];
47
48
// Hash value embedded in the evaluation file
49
static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
50
std::uint32_t hashValue = 0x538D24C7u;
51
hashValue += prevHash;
52
return hashValue;
53
}
54
55
// Read network parameters
56
bool read_parameters(std::istream&) { return true; }
57
58
// Write network parameters
59
bool write_parameters(std::ostream&) const { return true; }
60
61
// Forward propagation
62
void propagate(const InputType* input, OutputType* output) const {
63
64
#if defined(USE_AVX2)
65
if constexpr (InputDimensions % SimdWidth == 0)
66
{
67
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
68
const __m256i Offsets = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
69
const auto in = reinterpret_cast<const __m256i*>(input);
70
const auto out = reinterpret_cast<__m256i*>(output);
71
for (IndexType i = 0; i < NumChunks; ++i)
72
{
73
const __m256i words0 =
74
_mm256_srli_epi16(_mm256_packus_epi32(_mm256_load_si256(&in[i * 4 + 0]),
75
_mm256_load_si256(&in[i * 4 + 1])),
76
WeightScaleBits);
77
const __m256i words1 =
78
_mm256_srli_epi16(_mm256_packus_epi32(_mm256_load_si256(&in[i * 4 + 2]),
79
_mm256_load_si256(&in[i * 4 + 3])),
80
WeightScaleBits);
81
_mm256_store_si256(&out[i], _mm256_permutevar8x32_epi32(
82
_mm256_packs_epi16(words0, words1), Offsets));
83
}
84
}
85
else
86
{
87
constexpr IndexType NumChunks = InputDimensions / (SimdWidth / 2);
88
const auto in = reinterpret_cast<const __m128i*>(input);
89
const auto out = reinterpret_cast<__m128i*>(output);
90
for (IndexType i = 0; i < NumChunks; ++i)
91
{
92
const __m128i words0 = _mm_srli_epi16(
93
_mm_packus_epi32(_mm_load_si128(&in[i * 4 + 0]), _mm_load_si128(&in[i * 4 + 1])),
94
WeightScaleBits);
95
const __m128i words1 = _mm_srli_epi16(
96
_mm_packus_epi32(_mm_load_si128(&in[i * 4 + 2]), _mm_load_si128(&in[i * 4 + 3])),
97
WeightScaleBits);
98
_mm_store_si128(&out[i], _mm_packs_epi16(words0, words1));
99
}
100
}
101
constexpr IndexType Start = InputDimensions % SimdWidth == 0
102
? InputDimensions / SimdWidth * SimdWidth
103
: InputDimensions / (SimdWidth / 2) * (SimdWidth / 2);
104
105
#elif defined(USE_SSE2)
106
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
107
108
#ifndef USE_SSE41
109
const __m128i k0x80s = _mm_set1_epi8(-128);
110
#endif
111
112
const auto in = reinterpret_cast<const __m128i*>(input);
113
const auto out = reinterpret_cast<__m128i*>(output);
114
for (IndexType i = 0; i < NumChunks; ++i)
115
{
116
#if defined(USE_SSE41)
117
const __m128i words0 = _mm_srli_epi16(
118
_mm_packus_epi32(_mm_load_si128(&in[i * 4 + 0]), _mm_load_si128(&in[i * 4 + 1])),
119
WeightScaleBits);
120
const __m128i words1 = _mm_srli_epi16(
121
_mm_packus_epi32(_mm_load_si128(&in[i * 4 + 2]), _mm_load_si128(&in[i * 4 + 3])),
122
WeightScaleBits);
123
_mm_store_si128(&out[i], _mm_packs_epi16(words0, words1));
124
#else
125
const __m128i words0 = _mm_srai_epi16(
126
_mm_packs_epi32(_mm_load_si128(&in[i * 4 + 0]), _mm_load_si128(&in[i * 4 + 1])),
127
WeightScaleBits);
128
const __m128i words1 = _mm_srai_epi16(
129
_mm_packs_epi32(_mm_load_si128(&in[i * 4 + 2]), _mm_load_si128(&in[i * 4 + 3])),
130
WeightScaleBits);
131
const __m128i packedbytes = _mm_packs_epi16(words0, words1);
132
_mm_store_si128(&out[i], _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s));
133
#endif
134
}
135
constexpr IndexType Start = NumChunks * SimdWidth;
136
137
#elif defined(USE_NEON)
138
constexpr IndexType NumChunks = InputDimensions / (SimdWidth / 2);
139
const int8x8_t Zero = {0};
140
const auto in = reinterpret_cast<const int32x4_t*>(input);
141
const auto out = reinterpret_cast<int8x8_t*>(output);
142
for (IndexType i = 0; i < NumChunks; ++i)
143
{
144
int16x8_t shifted;
145
const auto pack = reinterpret_cast<int16x4_t*>(&shifted);
146
pack[0] = vqshrn_n_s32(in[i * 2 + 0], WeightScaleBits);
147
pack[1] = vqshrn_n_s32(in[i * 2 + 1], WeightScaleBits);
148
out[i] = vmax_s8(vqmovn_s16(shifted), Zero);
149
}
150
constexpr IndexType Start = NumChunks * (SimdWidth / 2);
151
#else
152
constexpr IndexType Start = 0;
153
#endif
154
155
for (IndexType i = Start; i < InputDimensions; ++i)
156
{
157
output[i] = static_cast<OutputType>(std::clamp(input[i] >> WeightScaleBits, 0, 127));
158
}
159
}
160
};
161
162
} // namespace Stockfish::Eval::NNUE::Layers
163
164
#endif // NNUE_LAYERS_CLIPPED_RELU_H_INCLUDED
165
166