Path: blob/master/src/nnue/layers/affine_transform_sparse_input.h
513 views
/*1Stockfish, a UCI chess playing engine derived from Glaurung 2.12Copyright (C) 2004-2026 The Stockfish developers (see AUTHORS file)34Stockfish is free software: you can redistribute it and/or modify5it under the terms of the GNU General Public License as published by6the Free Software Foundation, either version 3 of the License, or7(at your option) any later version.89Stockfish is distributed in the hope that it will be useful,10but WITHOUT ANY WARRANTY; without even the implied warranty of11MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the12GNU General Public License for more details.1314You should have received a copy of the GNU General Public License15along with this program. If not, see <http://www.gnu.org/licenses/>.16*/1718// Definition of layer AffineTransformSparseInput of NNUE evaluation function1920#ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED21#define NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED2223#include <algorithm>24#include <cstddef>25#include <cstdint>26#include <iostream>2728#include "../../bitboard.h"29#include "../../memory.h"30#include "../simd.h"31#include "../nnue_common.h"3233/*34This file contains the definition for a fully connected layer (aka affine transform) with block sparse input.35*/3637namespace Stockfish::Eval::NNUE::Layers {3839#if (USE_SSSE3 | (USE_NEON >= 8))40static constexpr int lsb_index64[64] = {410, 47, 1, 56, 48, 27, 2, 60, 57, 49, 41, 37, 28, 16, 3, 61, 54, 58, 35, 52, 50, 42,4221, 44, 38, 32, 29, 23, 17, 11, 4, 62, 46, 55, 26, 59, 40, 36, 15, 53, 34, 51, 20, 43,4331, 22, 10, 45, 25, 39, 14, 33, 19, 30, 9, 24, 13, 18, 8, 12, 7, 6, 5, 63};4445constexpr int constexpr_lsb(uint64_t bb) {46assert(bb != 0);47constexpr uint64_t debruijn64 = 0x03F79D71B4CB0A89ULL;48return lsb_index64[((bb ^ (bb - 1)) * debruijn64) >> 58];49}5051alignas(CacheLineSize) static constexpr struct OffsetIndices {5253std::uint16_t offset_indices[256][8];5455constexpr OffsetIndices() :56offset_indices() {57for (int i = 0; i < 256; ++i)58{59std::uint64_t j = i, k = 0;60while (j)61{62offset_indices[i][k++] = constexpr_lsb(j);63j &= j - 1;64}65while (k < 8)66offset_indices[i][k++] = 0;67}68}6970} Lookup;7172#if defined(__GNUC__) || defined(__clang__)73#define RESTRICT __restrict__74#elif defined(_MSC_VER)75#define RESTRICT __restrict76#else77#define RESTRICT78#endif7980// Find indices of nonzero 32-bit values in a packed byte buffer.81// The input pointer addresses a sequence of 32-bit blocks stored in a82// std::uint8_t array.83template<const IndexType InputDimensions>84void find_nnz(const std::uint8_t* RESTRICT input,85std::uint16_t* RESTRICT out,86IndexType& count_out) {8788#if defined(USE_AVX512ICL)8990constexpr IndexType SimdWidthIn = 64; // 512 bits91constexpr IndexType SimdWidthOut = 32; // 512 bits / 16 bits92constexpr IndexType NumChunks = InputDimensions / SimdWidthOut;93const __m512i increment = _mm512_set1_epi16(SimdWidthOut);94__m512i base = _mm512_set_epi16( // Same permute order as _mm512_packus_epi32()9531, 30, 29, 28, 15, 14, 13, 12, 27, 26, 25, 24, 11, 10, 9, 8, 23, 22, 21, 20, 7, 6, 5, 4, 19,9618, 17, 16, 3, 2, 1, 0);9798IndexType count = 0;99for (IndexType i = 0; i < NumChunks; ++i)100{101const __m512i inputV0 = _mm512_load_si512(input + i * 2 * SimdWidthIn);102const __m512i inputV1 = _mm512_load_si512(input + i * 2 * SimdWidthIn + SimdWidthIn);103104// Get a bitmask and gather non zero indices105const __m512i inputV01 = _mm512_packus_epi32(inputV0, inputV1);106const __mmask32 nnzMask = _mm512_test_epi16_mask(inputV01, inputV01);107108// Avoid _mm512_mask_compressstoreu_epi16() as it's 256 uOps on Zen4109__m512i nnz = _mm512_maskz_compress_epi16(nnzMask, base);110_mm512_storeu_si512(out + count, nnz);111112count += popcount(nnzMask);113base = _mm512_add_epi16(base, increment);114}115count_out = count;116117#elif defined(USE_AVX512)118119constexpr IndexType SimdWidth = 16; // 512 bits / 32 bits120constexpr IndexType NumChunks = InputDimensions / SimdWidth;121const __m512i increment = _mm512_set1_epi32(SimdWidth);122__m512i base = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);123124IndexType count = 0;125for (IndexType i = 0; i < NumChunks; ++i)126{127const __m512i inputV = _mm512_load_si512(input + i * SimdWidth * sizeof(std::uint32_t));128129// Get a bitmask and gather non zero indices130const __mmask16 nnzMask = _mm512_test_epi32_mask(inputV, inputV);131const __m512i nnzV = _mm512_maskz_compress_epi32(nnzMask, base);132_mm512_mask_cvtepi32_storeu_epi16(out + count, 0xFFFF, nnzV);133count += popcount(nnzMask);134base = _mm512_add_epi32(base, increment);135}136count_out = count;137138#else139140using namespace SIMD;141142constexpr IndexType InputSimdWidth = sizeof(vec_uint_t) / sizeof(std::int32_t);143// Outputs are processed 8 elements at a time, even if the SIMD width is narrower144constexpr IndexType ChunkSize = 8;145constexpr IndexType NumChunks = InputDimensions / ChunkSize;146constexpr IndexType InputsPerChunk = ChunkSize / InputSimdWidth;147148static_assert(InputsPerChunk > 0 && "SIMD width too wide");149150const auto inputVector = reinterpret_cast<const vec_uint_t*>(input);151IndexType count = 0;152vec128_t base = vec128_zero;153const vec128_t increment = vec128_set_16(8);154for (IndexType i = 0; i < NumChunks; ++i)155{156// bitmask of nonzero values in this chunk157unsigned nnz = 0;158for (IndexType j = 0; j < InputsPerChunk; ++j)159{160const vec_uint_t inputChunk = inputVector[i * InputsPerChunk + j];161nnz |= unsigned(vec_nnz(inputChunk)) << (j * InputSimdWidth);162}163const vec128_t offsets =164vec128_load(reinterpret_cast<const vec128_t*>(&Lookup.offset_indices[nnz]));165vec128_storeu(reinterpret_cast<vec128_t*>(out + count), vec128_add(base, offsets));166count += popcount(nnz);167base = vec128_add(base, increment);168}169count_out = count;170#endif171}172173#endif174175// Sparse input implementation176template<IndexType InDims, IndexType OutDims>177class AffineTransformSparseInput {178public:179// Input/output type180using InputType = std::uint8_t;181using OutputType = std::int32_t;182183// Number of input/output dimensions184static constexpr IndexType InputDimensions = InDims;185static constexpr IndexType OutputDimensions = OutDims;186187static_assert(OutputDimensions % 16 == 0,188"Only implemented for OutputDimensions divisible by 16.");189190static constexpr IndexType PaddedInputDimensions =191ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);192static constexpr IndexType PaddedOutputDimensions =193ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);194195#if (USE_SSSE3 | (USE_NEON >= 8))196static constexpr IndexType ChunkSize = 4;197#else198static constexpr IndexType ChunkSize = 1;199#endif200201using OutputBuffer = OutputType[PaddedOutputDimensions];202203// Hash value embedded in the evaluation file204static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {205std::uint32_t hashValue = 0xCC03DAE4u;206hashValue += OutputDimensions;207hashValue ^= prevHash >> 1;208hashValue ^= prevHash << 31;209return hashValue;210}211212static constexpr IndexType get_weight_index_scrambled(IndexType i) {213return (i / ChunkSize) % (PaddedInputDimensions / ChunkSize) * OutputDimensions * ChunkSize214+ i / PaddedInputDimensions * ChunkSize + i % ChunkSize;215}216217static constexpr IndexType get_weight_index(IndexType i) {218#if (USE_SSSE3 | (USE_NEON >= 8))219return get_weight_index_scrambled(i);220#else221return i;222#endif223}224225// Read network parameters226bool read_parameters(std::istream& stream) {227read_little_endian<BiasType>(stream, biases, OutputDimensions);228for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)229weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);230231return !stream.fail();232}233234// Write network parameters235bool write_parameters(std::ostream& stream) const {236write_little_endian<BiasType>(stream, biases, OutputDimensions);237238for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)239write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);240241return !stream.fail();242}243244std::size_t get_content_hash() const {245std::size_t h = 0;246hash_combine(h, get_raw_data_hash(biases));247hash_combine(h, get_raw_data_hash(weights));248hash_combine(h, get_hash_value(0));249return h;250}251252// Forward propagation253void propagate(const InputType* input, OutputType* output) const {254255#if (USE_SSSE3 | (USE_NEON >= 8))256#if defined(USE_AVX512)257using invec_t = __m512i;258using outvec_t = __m512i;259#define vec_add_32 _mm512_add_epi32260#define vec_set_32 _mm512_set1_epi32261#define vec_add_dpbusd_32 SIMD::m512_add_dpbusd_epi32262#elif defined(USE_AVX2)263using invec_t = __m256i;264using outvec_t = __m256i;265#define vec_add_32 _mm256_add_epi32266#define vec_set_32 _mm256_set1_epi32267#define vec_add_dpbusd_32 SIMD::m256_add_dpbusd_epi32268#elif defined(USE_SSSE3)269using invec_t = __m128i;270using outvec_t = __m128i;271#define vec_set_32 _mm_set1_epi32272#define vec_add_dpbusd_32 SIMD::m128_add_dpbusd_epi32273#elif defined(USE_NEON_DOTPROD)274using invec_t = int8x16_t;275using outvec_t = int32x4_t;276#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))277#define vec_add_dpbusd_32 SIMD::dotprod_m128_add_dpbusd_epi32278#elif defined(USE_NEON)279using invec_t = int8x16_t;280using outvec_t = int32x4_t;281#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))282#define vec_add_dpbusd_32 SIMD::neon_m128_add_dpbusd_epi32283#endif284constexpr IndexType OutputSimdWidth = sizeof(outvec_t) / sizeof(OutputType);285constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / ChunkSize;286constexpr IndexType NumAccums = OutputDimensions / OutputSimdWidth;287// If we're using high-latency dot product instructions, split the accumulators288// to create 3 separate dependency chains and merge at the end289constexpr IndexType NumRegs =290#if defined(USE_VNNI)2913 * NumAccums;292#else293NumAccums;294#endif295std::uint16_t nnz[NumChunks];296IndexType count;297298// Find indices of nonzero 32-bit blocks299find_nnz<NumChunks>(input, nnz, count);300301const outvec_t* biasvec = reinterpret_cast<const outvec_t*>(biases);302outvec_t acc[NumRegs];303for (IndexType k = 0; k < NumAccums; ++k)304acc[k] = biasvec[k];305306const auto* start = nnz;307const auto* end = nnz + count;308309// convince GCC to not do weird pointer arithmetic in the following loop310const std::int8_t* weights_cp = weights;311#if defined(USE_VNNI)312for (IndexType k = NumAccums; k < NumRegs; ++k)313acc[k] = vec_zero();314315while (start < end - 2)316{317const std::ptrdiff_t i0 = *start++;318const std::ptrdiff_t i1 = *start++;319const std::ptrdiff_t i2 = *start++;320const invec_t in0 =321vec_set_32(load_as<std::int32_t>(input + i0 * sizeof(std::int32_t)));322const invec_t in1 =323vec_set_32(load_as<std::int32_t>(input + i1 * sizeof(std::int32_t)));324const invec_t in2 =325vec_set_32(load_as<std::int32_t>(input + i2 * sizeof(std::int32_t)));326const auto col0 =327reinterpret_cast<const invec_t*>(&weights_cp[i0 * OutputDimensions * ChunkSize]);328const auto col1 =329reinterpret_cast<const invec_t*>(&weights_cp[i1 * OutputDimensions * ChunkSize]);330const auto col2 =331reinterpret_cast<const invec_t*>(&weights_cp[i2 * OutputDimensions * ChunkSize]);332for (IndexType k = 0; k < NumAccums; ++k)333{334vec_add_dpbusd_32(acc[k], in0, col0[k]);335vec_add_dpbusd_32(acc[k + NumAccums], in1, col1[k]);336vec_add_dpbusd_32(acc[k + 2 * NumAccums], in2, col2[k]);337}338}339for (IndexType k = 0; k < NumAccums; ++k)340acc[k] = vec_add_32(vec_add_32(acc[k], acc[k + NumAccums]), acc[k + 2 * NumAccums]);341#endif342while (start < end)343{344const std::ptrdiff_t i = *start++;345const invec_t in = vec_set_32(load_as<std::int32_t>(input + i * sizeof(std::int32_t)));346const auto col =347reinterpret_cast<const invec_t*>(&weights_cp[i * OutputDimensions * ChunkSize]);348for (IndexType k = 0; k < NumAccums; ++k)349vec_add_dpbusd_32(acc[k], in, col[k]);350}351352outvec_t* outptr = reinterpret_cast<outvec_t*>(output);353for (IndexType k = 0; k < NumAccums; ++k)354outptr[k] = acc[k];355356#undef vec_set_32357#undef vec_add_dpbusd_32358#ifdef vec_add_32359#undef vec_add_32360#endif361#else362// Use dense implementation for the other architectures.363affine_transform_non_ssse3<InputDimensions, PaddedInputDimensions, OutputDimensions>(364output, weights, biases, input);365#endif366}367368private:369using BiasType = OutputType;370using WeightType = std::int8_t;371372alignas(CacheLineSize) BiasType biases[OutputDimensions];373alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];374};375376} // namespace Stockfish::Eval::NNUE::Layers377378#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED379380381