Path: blob/master/src/nnue/layers/affine_transform_sparse_input.h
394 views
/*1Stockfish, a UCI chess playing engine derived from Glaurung 2.12Copyright (C) 2004-2025 The Stockfish developers (see AUTHORS file)34Stockfish is free software: you can redistribute it and/or modify5it under the terms of the GNU General Public License as published by6the Free Software Foundation, either version 3 of the License, or7(at your option) any later version.89Stockfish is distributed in the hope that it will be useful,10but WITHOUT ANY WARRANTY; without even the implied warranty of11MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the12GNU General Public License for more details.1314You should have received a copy of the GNU General Public License15along with this program. If not, see <http://www.gnu.org/licenses/>.16*/1718// Definition of layer AffineTransformSparseInput of NNUE evaluation function1920#ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED21#define NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED2223#include <algorithm>24#include <cstddef>25#include <cstdint>26#include <iostream>2728#include "../../bitboard.h"29#include "../simd.h"30#include "../nnue_common.h"3132/*33This file contains the definition for a fully connected layer (aka affine transform) with block sparse input.34*/3536namespace Stockfish::Eval::NNUE::Layers {3738#if (USE_SSSE3 | (USE_NEON >= 8))39static constexpr int lsb_index64[64] = {400, 47, 1, 56, 48, 27, 2, 60, 57, 49, 41, 37, 28, 16, 3, 61, 54, 58, 35, 52, 50, 42,4121, 44, 38, 32, 29, 23, 17, 11, 4, 62, 46, 55, 26, 59, 40, 36, 15, 53, 34, 51, 20, 43,4231, 22, 10, 45, 25, 39, 14, 33, 19, 30, 9, 24, 13, 18, 8, 12, 7, 6, 5, 63};4344constexpr int constexpr_lsb(uint64_t bb) {45assert(bb != 0);46constexpr uint64_t debruijn64 = 0x03F79D71B4CB0A89ULL;47return lsb_index64[((bb ^ (bb - 1)) * debruijn64) >> 58];48}4950alignas(CacheLineSize) static constexpr struct OffsetIndices {5152std::uint16_t offset_indices[256][8];5354constexpr OffsetIndices() :55offset_indices() {56for (int i = 0; i < 256; ++i)57{58std::uint64_t j = i, k = 0;59while (j)60{61offset_indices[i][k++] = constexpr_lsb(j);62j &= j - 1;63}64while (k < 8)65offset_indices[i][k++] = 0;66}67}6869} Lookup;7071#if defined(__GNUC__) || defined(__clang__)72#define RESTRICT __restrict__73#elif defined(_MSC_VER)74#define RESTRICT __restrict75#else76#define RESTRICT77#endif7879// Find indices of nonzero numbers in an int32_t array80template<const IndexType InputDimensions>81void find_nnz(const std::int32_t* RESTRICT input,82std::uint16_t* RESTRICT out,83IndexType& count_out) {8485#if defined(USE_AVX512ICL)8687constexpr IndexType SimdWidthIn = 16; // 512 bits / 32 bits88constexpr IndexType SimdWidthOut = 32; // 512 bits / 16 bits89constexpr IndexType NumChunks = InputDimensions / SimdWidthOut;90const __m512i increment = _mm512_set1_epi16(SimdWidthOut);91__m512i base = _mm512_set_epi16( // Same permute order as _mm512_packus_epi32()9231, 30, 29, 28, 15, 14, 13, 12, 27, 26, 25, 24, 11, 10, 9, 8, 23, 22, 21, 20, 7, 6, 5, 4, 19,9318, 17, 16, 3, 2, 1, 0);9495IndexType count = 0;96for (IndexType i = 0; i < NumChunks; ++i)97{98const __m512i inputV0 = _mm512_load_si512(input + i * 2 * SimdWidthIn);99const __m512i inputV1 = _mm512_load_si512(input + i * 2 * SimdWidthIn + SimdWidthIn);100101// Get a bitmask and gather non zero indices102const __m512i inputV01 = _mm512_packus_epi32(inputV0, inputV1);103const __mmask32 nnzMask = _mm512_test_epi16_mask(inputV01, inputV01);104105// Avoid _mm512_mask_compressstoreu_epi16() as it's 256 uOps on Zen4106__m512i nnz = _mm512_maskz_compress_epi16(nnzMask, base);107_mm512_storeu_si512(out + count, nnz);108109count += popcount(nnzMask);110base = _mm512_add_epi16(base, increment);111}112count_out = count;113114#elif defined(USE_AVX512)115116constexpr IndexType SimdWidth = 16; // 512 bits / 32 bits117constexpr IndexType NumChunks = InputDimensions / SimdWidth;118const __m512i increment = _mm512_set1_epi32(SimdWidth);119__m512i base = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);120121IndexType count = 0;122for (IndexType i = 0; i < NumChunks; ++i)123{124const __m512i inputV = _mm512_load_si512(input + i * SimdWidth);125126// Get a bitmask and gather non zero indices127const __mmask16 nnzMask = _mm512_test_epi32_mask(inputV, inputV);128const __m512i nnzV = _mm512_maskz_compress_epi32(nnzMask, base);129_mm512_mask_cvtepi32_storeu_epi16(out + count, 0xFFFF, nnzV);130count += popcount(nnzMask);131base = _mm512_add_epi32(base, increment);132}133count_out = count;134135#else136137using namespace SIMD;138139constexpr IndexType InputSimdWidth = sizeof(vec_uint_t) / sizeof(std::int32_t);140// Inputs are processed InputSimdWidth at a time and outputs are processed 8 at a time so we process in chunks of max(InputSimdWidth, 8)141constexpr IndexType ChunkSize = std::max<IndexType>(InputSimdWidth, 8);142constexpr IndexType NumChunks = InputDimensions / ChunkSize;143constexpr IndexType InputsPerChunk = ChunkSize / InputSimdWidth;144constexpr IndexType OutputsPerChunk = ChunkSize / 8;145146const auto inputVector = reinterpret_cast<const vec_uint_t*>(input);147IndexType count = 0;148vec128_t base = vec128_zero;149const vec128_t increment = vec128_set_16(8);150for (IndexType i = 0; i < NumChunks; ++i)151{152// bitmask of nonzero values in this chunk153unsigned nnz = 0;154for (IndexType j = 0; j < InputsPerChunk; ++j)155{156const vec_uint_t inputChunk = inputVector[i * InputsPerChunk + j];157nnz |= unsigned(vec_nnz(inputChunk)) << (j * InputSimdWidth);158}159for (IndexType j = 0; j < OutputsPerChunk; ++j)160{161const unsigned lookup = (nnz >> (j * 8)) & 0xFF;162const vec128_t offsets =163vec128_load(reinterpret_cast<const vec128_t*>(&Lookup.offset_indices[lookup]));164vec128_storeu(reinterpret_cast<vec128_t*>(out + count), vec128_add(base, offsets));165count += popcount(lookup);166base = vec128_add(base, increment);167}168}169count_out = count;170#endif171}172173#endif174175// Sparse input implementation176template<IndexType InDims, IndexType OutDims>177class AffineTransformSparseInput {178public:179// Input/output type180using InputType = std::uint8_t;181using OutputType = std::int32_t;182183// Number of input/output dimensions184static constexpr IndexType InputDimensions = InDims;185static constexpr IndexType OutputDimensions = OutDims;186187static_assert(OutputDimensions % 16 == 0,188"Only implemented for OutputDimensions divisible by 16.");189190static constexpr IndexType PaddedInputDimensions =191ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);192static constexpr IndexType PaddedOutputDimensions =193ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);194195#if (USE_SSSE3 | (USE_NEON >= 8))196static constexpr IndexType ChunkSize = 4;197#else198static constexpr IndexType ChunkSize = 1;199#endif200201using OutputBuffer = OutputType[PaddedOutputDimensions];202203// Hash value embedded in the evaluation file204static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {205std::uint32_t hashValue = 0xCC03DAE4u;206hashValue += OutputDimensions;207hashValue ^= prevHash >> 1;208hashValue ^= prevHash << 31;209return hashValue;210}211212static constexpr IndexType get_weight_index_scrambled(IndexType i) {213return (i / ChunkSize) % (PaddedInputDimensions / ChunkSize) * OutputDimensions * ChunkSize214+ i / PaddedInputDimensions * ChunkSize + i % ChunkSize;215}216217static constexpr IndexType get_weight_index(IndexType i) {218#if (USE_SSSE3 | (USE_NEON >= 8))219return get_weight_index_scrambled(i);220#else221return i;222#endif223}224225// Read network parameters226bool read_parameters(std::istream& stream) {227read_little_endian<BiasType>(stream, biases, OutputDimensions);228for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)229weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);230231return !stream.fail();232}233234// Write network parameters235bool write_parameters(std::ostream& stream) const {236write_little_endian<BiasType>(stream, biases, OutputDimensions);237238for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)239write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);240241return !stream.fail();242}243// Forward propagation244void propagate(const InputType* input, OutputType* output) const {245246#if (USE_SSSE3 | (USE_NEON >= 8))247#if defined(USE_AVX512)248using invec_t = __m512i;249using outvec_t = __m512i;250#define vec_add_32 _mm512_add_epi32251#define vec_set_32 _mm512_set1_epi32252#define vec_add_dpbusd_32 SIMD::m512_add_dpbusd_epi32253#elif defined(USE_AVX2)254using invec_t = __m256i;255using outvec_t = __m256i;256#define vec_add_32 _mm256_add_epi32257#define vec_set_32 _mm256_set1_epi32258#define vec_add_dpbusd_32 SIMD::m256_add_dpbusd_epi32259#elif defined(USE_SSSE3)260using invec_t = __m128i;261using outvec_t = __m128i;262#define vec_set_32 _mm_set1_epi32263#define vec_add_dpbusd_32 SIMD::m128_add_dpbusd_epi32264#elif defined(USE_NEON_DOTPROD)265using invec_t = int8x16_t;266using outvec_t = int32x4_t;267#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))268#define vec_add_dpbusd_32 SIMD::dotprod_m128_add_dpbusd_epi32269#elif defined(USE_NEON)270using invec_t = int8x16_t;271using outvec_t = int32x4_t;272#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))273#define vec_add_dpbusd_32 SIMD::neon_m128_add_dpbusd_epi32274#endif275constexpr IndexType OutputSimdWidth = sizeof(outvec_t) / sizeof(OutputType);276constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / ChunkSize;277constexpr IndexType NumAccums = OutputDimensions / OutputSimdWidth;278// If we're using high-latency dot product instructions, split the accumulators279// to create 3 separate dependency chains and merge at the end280constexpr IndexType NumRegs =281#if defined(USE_VNNI)2823 * NumAccums;283#else284NumAccums;285#endif286std::uint16_t nnz[NumChunks];287IndexType count;288289const auto input32 = reinterpret_cast<const std::int32_t*>(input);290291// Find indices of nonzero 32-bit blocks292find_nnz<NumChunks>(input32, nnz, count);293294const outvec_t* biasvec = reinterpret_cast<const outvec_t*>(biases);295outvec_t acc[NumRegs];296for (IndexType k = 0; k < NumAccums; ++k)297acc[k] = biasvec[k];298299const auto* start = nnz;300const auto* end = nnz + count;301302// convince GCC to not do weird pointer arithmetic in the following loop303const std::int8_t* weights_cp = weights;304#if defined(USE_VNNI)305for (IndexType k = NumAccums; k < NumRegs; ++k)306acc[k] = vec_zero();307308while (start < end - 2)309{310const std::ptrdiff_t i0 = *start++;311const std::ptrdiff_t i1 = *start++;312const std::ptrdiff_t i2 = *start++;313const invec_t in0 = vec_set_32(input32[i0]);314const invec_t in1 = vec_set_32(input32[i1]);315const invec_t in2 = vec_set_32(input32[i2]);316const auto col0 =317reinterpret_cast<const invec_t*>(&weights_cp[i0 * OutputDimensions * ChunkSize]);318const auto col1 =319reinterpret_cast<const invec_t*>(&weights_cp[i1 * OutputDimensions * ChunkSize]);320const auto col2 =321reinterpret_cast<const invec_t*>(&weights_cp[i2 * OutputDimensions * ChunkSize]);322for (IndexType k = 0; k < NumAccums; ++k)323{324vec_add_dpbusd_32(acc[k], in0, col0[k]);325vec_add_dpbusd_32(acc[k + NumAccums], in1, col1[k]);326vec_add_dpbusd_32(acc[k + 2 * NumAccums], in2, col2[k]);327}328}329for (IndexType k = 0; k < NumAccums; ++k)330acc[k] = vec_add_32(vec_add_32(acc[k], acc[k + NumAccums]), acc[k + 2 * NumAccums]);331#endif332while (start < end)333{334const std::ptrdiff_t i = *start++;335const invec_t in = vec_set_32(input32[i]);336const auto col =337reinterpret_cast<const invec_t*>(&weights_cp[i * OutputDimensions * ChunkSize]);338for (IndexType k = 0; k < NumAccums; ++k)339vec_add_dpbusd_32(acc[k], in, col[k]);340}341342outvec_t* outptr = reinterpret_cast<outvec_t*>(output);343for (IndexType k = 0; k < NumAccums; ++k)344outptr[k] = acc[k];345346#undef vec_set_32347#undef vec_add_dpbusd_32348#ifdef vec_add_32349#undef vec_add_32350#endif351#else352// Use dense implementation for the other architectures.353affine_transform_non_ssse3<InputDimensions, PaddedInputDimensions, OutputDimensions>(354output, weights, biases, input);355#endif356}357358private:359using BiasType = OutputType;360using WeightType = std::int8_t;361362alignas(CacheLineSize) BiasType biases[OutputDimensions];363alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];364};365366} // namespace Stockfish::Eval::NNUE::Layers367368#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED369370371