Path: blob/master/src/nnue/layers/affine_transform_sparse_input.h
376 views
/*1Stockfish, a UCI chess playing engine derived from Glaurung 2.12Copyright (C) 2004-2025 The Stockfish developers (see AUTHORS file)34Stockfish is free software: you can redistribute it and/or modify5it under the terms of the GNU General Public License as published by6the Free Software Foundation, either version 3 of the License, or7(at your option) any later version.89Stockfish is distributed in the hope that it will be useful,10but WITHOUT ANY WARRANTY; without even the implied warranty of11MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the12GNU General Public License for more details.1314You should have received a copy of the GNU General Public License15along with this program. If not, see <http://www.gnu.org/licenses/>.16*/1718// Definition of layer AffineTransformSparseInput of NNUE evaluation function1920#ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED21#define NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED2223#include <algorithm>24#include <cstdint>25#include <iostream>2627#include "../../bitboard.h"28#include "../simd.h"29#include "../nnue_common.h"3031/*32This file contains the definition for a fully connected layer (aka affine transform) with block sparse input.33*/3435namespace Stockfish::Eval::NNUE::Layers {3637#if (USE_SSSE3 | (USE_NEON >= 8))38static constexpr int lsb_index64[64] = {390, 47, 1, 56, 48, 27, 2, 60, 57, 49, 41, 37, 28, 16, 3, 61, 54, 58, 35, 52, 50, 42,4021, 44, 38, 32, 29, 23, 17, 11, 4, 62, 46, 55, 26, 59, 40, 36, 15, 53, 34, 51, 20, 43,4131, 22, 10, 45, 25, 39, 14, 33, 19, 30, 9, 24, 13, 18, 8, 12, 7, 6, 5, 63};4243constexpr int constexpr_lsb(uint64_t bb) {44assert(bb != 0);45constexpr uint64_t debruijn64 = 0x03F79D71B4CB0A89ULL;46return lsb_index64[((bb ^ (bb - 1)) * debruijn64) >> 58];47}4849alignas(CacheLineSize) static constexpr struct OffsetIndices {5051std::uint16_t offset_indices[256][8];5253constexpr OffsetIndices() :54offset_indices() {55for (int i = 0; i < 256; ++i)56{57std::uint64_t j = i, k = 0;58while (j)59{60offset_indices[i][k++] = constexpr_lsb(j);61j &= j - 1;62}63while (k < 8)64offset_indices[i][k++] = 0;65}66}6768} Lookup;6970#if defined(__GNUC__) || defined(__clang__)71#define RESTRICT __restrict__72#elif defined(_MSC_VER)73#define RESTRICT __restrict74#else75#define RESTRICT76#endif7778// Find indices of nonzero numbers in an int32_t array79template<const IndexType InputDimensions>80void find_nnz(const std::int32_t* RESTRICT input,81std::uint16_t* RESTRICT out,82IndexType& count_out) {8384#if defined(USE_AVX512ICL)8586constexpr IndexType SimdWidthIn = 16; // 512 bits / 32 bits87constexpr IndexType SimdWidthOut = 32; // 512 bits / 16 bits88constexpr IndexType NumChunks = InputDimensions / SimdWidthOut;89const __m512i increment = _mm512_set1_epi16(SimdWidthOut);90__m512i base = _mm512_set_epi16( // Same permute order as _mm512_packus_epi32()9131, 30, 29, 28, 15, 14, 13, 12, 27, 26, 25, 24, 11, 10, 9, 8, 23, 22, 21, 20, 7, 6, 5, 4, 19,9218, 17, 16, 3, 2, 1, 0);9394IndexType count = 0;95for (IndexType i = 0; i < NumChunks; ++i)96{97const __m512i inputV0 = _mm512_load_si512(input + i * 2 * SimdWidthIn);98const __m512i inputV1 = _mm512_load_si512(input + i * 2 * SimdWidthIn + SimdWidthIn);99100// Get a bitmask and gather non zero indices101const __m512i inputV01 = _mm512_packus_epi32(inputV0, inputV1);102const __mmask32 nnzMask = _mm512_test_epi16_mask(inputV01, inputV01);103104// Avoid _mm512_mask_compressstoreu_epi16() as it's 256 uOps on Zen4105__m512i nnz = _mm512_maskz_compress_epi16(nnzMask, base);106_mm512_storeu_si512(out + count, nnz);107108count += popcount(nnzMask);109base = _mm512_add_epi16(base, increment);110}111count_out = count;112113#elif defined(USE_AVX512)114115constexpr IndexType SimdWidth = 16; // 512 bits / 32 bits116constexpr IndexType NumChunks = InputDimensions / SimdWidth;117const __m512i increment = _mm512_set1_epi32(SimdWidth);118__m512i base = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);119120IndexType count = 0;121for (IndexType i = 0; i < NumChunks; ++i)122{123const __m512i inputV = _mm512_load_si512(input + i * SimdWidth);124125// Get a bitmask and gather non zero indices126const __mmask16 nnzMask = _mm512_test_epi32_mask(inputV, inputV);127const __m512i nnzV = _mm512_maskz_compress_epi32(nnzMask, base);128_mm512_mask_cvtepi32_storeu_epi16(out + count, 0xFFFF, nnzV);129count += popcount(nnzMask);130base = _mm512_add_epi32(base, increment);131}132count_out = count;133134#else135136using namespace SIMD;137138constexpr IndexType InputSimdWidth = sizeof(vec_uint_t) / sizeof(std::int32_t);139// Inputs are processed InputSimdWidth at a time and outputs are processed 8 at a time so we process in chunks of max(InputSimdWidth, 8)140constexpr IndexType ChunkSize = std::max<IndexType>(InputSimdWidth, 8);141constexpr IndexType NumChunks = InputDimensions / ChunkSize;142constexpr IndexType InputsPerChunk = ChunkSize / InputSimdWidth;143constexpr IndexType OutputsPerChunk = ChunkSize / 8;144145const auto inputVector = reinterpret_cast<const vec_uint_t*>(input);146IndexType count = 0;147vec128_t base = vec128_zero;148const vec128_t increment = vec128_set_16(8);149for (IndexType i = 0; i < NumChunks; ++i)150{151// bitmask of nonzero values in this chunk152unsigned nnz = 0;153for (IndexType j = 0; j < InputsPerChunk; ++j)154{155const vec_uint_t inputChunk = inputVector[i * InputsPerChunk + j];156nnz |= unsigned(vec_nnz(inputChunk)) << (j * InputSimdWidth);157}158for (IndexType j = 0; j < OutputsPerChunk; ++j)159{160const unsigned lookup = (nnz >> (j * 8)) & 0xFF;161const vec128_t offsets =162vec128_load(reinterpret_cast<const vec128_t*>(&Lookup.offset_indices[lookup]));163vec128_storeu(reinterpret_cast<vec128_t*>(out + count), vec128_add(base, offsets));164count += popcount(lookup);165base = vec128_add(base, increment);166}167}168count_out = count;169#endif170}171172#endif173174// Sparse input implementation175template<IndexType InDims, IndexType OutDims>176class AffineTransformSparseInput {177public:178// Input/output type179using InputType = std::uint8_t;180using OutputType = std::int32_t;181182// Number of input/output dimensions183static constexpr IndexType InputDimensions = InDims;184static constexpr IndexType OutputDimensions = OutDims;185186static_assert(OutputDimensions % 16 == 0,187"Only implemented for OutputDimensions divisible by 16.");188189static constexpr IndexType PaddedInputDimensions =190ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);191static constexpr IndexType PaddedOutputDimensions =192ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);193194#if (USE_SSSE3 | (USE_NEON >= 8))195static constexpr IndexType ChunkSize = 4;196#else197static constexpr IndexType ChunkSize = 1;198#endif199200using OutputBuffer = OutputType[PaddedOutputDimensions];201202// Hash value embedded in the evaluation file203static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {204std::uint32_t hashValue = 0xCC03DAE4u;205hashValue += OutputDimensions;206hashValue ^= prevHash >> 1;207hashValue ^= prevHash << 31;208return hashValue;209}210211static constexpr IndexType get_weight_index_scrambled(IndexType i) {212return (i / ChunkSize) % (PaddedInputDimensions / ChunkSize) * OutputDimensions * ChunkSize213+ i / PaddedInputDimensions * ChunkSize + i % ChunkSize;214}215216static constexpr IndexType get_weight_index(IndexType i) {217#if (USE_SSSE3 | (USE_NEON >= 8))218return get_weight_index_scrambled(i);219#else220return i;221#endif222}223224// Read network parameters225bool read_parameters(std::istream& stream) {226read_little_endian<BiasType>(stream, biases, OutputDimensions);227for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)228weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);229230return !stream.fail();231}232233// Write network parameters234bool write_parameters(std::ostream& stream) const {235write_little_endian<BiasType>(stream, biases, OutputDimensions);236237for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)238write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);239240return !stream.fail();241}242// Forward propagation243void propagate(const InputType* input, OutputType* output) const {244245#if (USE_SSSE3 | (USE_NEON >= 8))246#if defined(USE_AVX512)247using invec_t = __m512i;248using outvec_t = __m512i;249#define vec_set_32 _mm512_set1_epi32250#define vec_add_dpbusd_32 SIMD::m512_add_dpbusd_epi32251#elif defined(USE_AVX2)252using invec_t = __m256i;253using outvec_t = __m256i;254#define vec_set_32 _mm256_set1_epi32255#define vec_add_dpbusd_32 SIMD::m256_add_dpbusd_epi32256#elif defined(USE_SSSE3)257using invec_t = __m128i;258using outvec_t = __m128i;259#define vec_set_32 _mm_set1_epi32260#define vec_add_dpbusd_32 SIMD::m128_add_dpbusd_epi32261#elif defined(USE_NEON_DOTPROD)262using invec_t = int8x16_t;263using outvec_t = int32x4_t;264#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))265#define vec_add_dpbusd_32 SIMD::dotprod_m128_add_dpbusd_epi32266#elif defined(USE_NEON)267using invec_t = int8x16_t;268using outvec_t = int32x4_t;269#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))270#define vec_add_dpbusd_32 SIMD::neon_m128_add_dpbusd_epi32271#endif272static constexpr IndexType OutputSimdWidth = sizeof(outvec_t) / sizeof(OutputType);273274constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / ChunkSize;275constexpr IndexType NumRegs = OutputDimensions / OutputSimdWidth;276std::uint16_t nnz[NumChunks];277IndexType count;278279const auto input32 = reinterpret_cast<const std::int32_t*>(input);280281// Find indices of nonzero 32-bit blocks282find_nnz<NumChunks>(input32, nnz, count);283284const outvec_t* biasvec = reinterpret_cast<const outvec_t*>(biases);285outvec_t acc[NumRegs];286for (IndexType k = 0; k < NumRegs; ++k)287acc[k] = biasvec[k];288289for (IndexType j = 0; j < count; ++j)290{291const auto i = nnz[j];292const invec_t in = vec_set_32(input32[i]);293const auto col =294reinterpret_cast<const invec_t*>(&weights[i * OutputDimensions * ChunkSize]);295for (IndexType k = 0; k < NumRegs; ++k)296vec_add_dpbusd_32(acc[k], in, col[k]);297}298299outvec_t* outptr = reinterpret_cast<outvec_t*>(output);300for (IndexType k = 0; k < NumRegs; ++k)301outptr[k] = acc[k];302#undef vec_set_32303#undef vec_add_dpbusd_32304#else305// Use dense implementation for the other architectures.306affine_transform_non_ssse3<InputDimensions, PaddedInputDimensions, OutputDimensions>(307output, weights, biases, input);308#endif309}310311private:312using BiasType = OutputType;313using WeightType = std::int8_t;314315alignas(CacheLineSize) BiasType biases[OutputDimensions];316alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];317};318319} // namespace Stockfish::Eval::NNUE::Layers320321#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED322323324