Path: blob/main/contrib/arm-optimized-routines/networking/aarch64/chksum_simd.c
48255 views
/*1* AArch64-specific checksum implementation using NEON2*3* Copyright (c) 2020, Arm Limited.4* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception5*/67#include "networking.h"8#include "../chksum_common.h"910#ifndef __ARM_NEON11#pragma GCC target("+simd")12#endif1314#include <arm_neon.h>1516always_inline17static inline uint64_t18slurp_head64(const void **pptr, uint32_t *nbytes)19{20Assert(*nbytes >= 8);21uint64_t sum = 0;22uint32_t off = (uintptr_t) *pptr % 8;23if (likely(off != 0))24{25/* Get rid of bytes 0..off-1 */26const unsigned char *ptr64 = align_ptr(*pptr, 8);27uint64_t mask = ALL_ONES << (CHAR_BIT * off);28uint64_t val = load64(ptr64) & mask;29/* Fold 64-bit sum to 33 bits */30sum = val >> 32;31sum += (uint32_t) val;32*pptr = ptr64 + 8;33*nbytes -= 8 - off;34}35return sum;36}3738always_inline39static inline uint64_t40slurp_tail64(uint64_t sum, const void *ptr, uint32_t nbytes)41{42Assert(nbytes < 8);43if (likely(nbytes != 0))44{45/* Get rid of bytes 7..nbytes */46uint64_t mask = ALL_ONES >> (CHAR_BIT * (8 - nbytes));47Assert(__builtin_popcountl(mask) / CHAR_BIT == nbytes);48uint64_t val = load64(ptr) & mask;49sum += val >> 32;50sum += (uint32_t) val;51nbytes = 0;52}53Assert(nbytes == 0);54return sum;55}5657unsigned short58__chksum_aarch64_simd(const void *ptr, unsigned int nbytes)59{60bool swap = (uintptr_t) ptr & 1;61uint64_t sum;6263if (unlikely(nbytes < 50))64{65sum = slurp_small(ptr, nbytes);66swap = false;67goto fold;68}6970/* 8-byte align pointer */71Assert(nbytes >= 8);72sum = slurp_head64(&ptr, &nbytes);73Assert(((uintptr_t) ptr & 7) == 0);7475const uint32_t *may_alias ptr32 = ptr;7677uint64x2_t vsum0 = { 0, 0 };78uint64x2_t vsum1 = { 0, 0 };79uint64x2_t vsum2 = { 0, 0 };80uint64x2_t vsum3 = { 0, 0 };8182/* Sum groups of 64 bytes */83for (uint32_t i = 0; i < nbytes / 64; i++)84{85uint32x4_t vtmp0 = vld1q_u32(ptr32);86uint32x4_t vtmp1 = vld1q_u32(ptr32 + 4);87uint32x4_t vtmp2 = vld1q_u32(ptr32 + 8);88uint32x4_t vtmp3 = vld1q_u32(ptr32 + 12);89vsum0 = vpadalq_u32(vsum0, vtmp0);90vsum1 = vpadalq_u32(vsum1, vtmp1);91vsum2 = vpadalq_u32(vsum2, vtmp2);92vsum3 = vpadalq_u32(vsum3, vtmp3);93ptr32 += 16;94}95nbytes %= 64;9697/* Fold vsum2 and vsum3 into vsum0 and vsum1 */98vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum2));99vsum1 = vpadalq_u32(vsum1, vreinterpretq_u32_u64(vsum3));100101/* Add any trailing group of 32 bytes */102if (nbytes & 32)103{104uint32x4_t vtmp0 = vld1q_u32(ptr32);105uint32x4_t vtmp1 = vld1q_u32(ptr32 + 4);106vsum0 = vpadalq_u32(vsum0, vtmp0);107vsum1 = vpadalq_u32(vsum1, vtmp1);108ptr32 += 8;109nbytes -= 32;110}111Assert(nbytes < 32);112113/* Fold vsum1 into vsum0 */114vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum1));115116/* Add any trailing group of 16 bytes */117if (nbytes & 16)118{119uint32x4_t vtmp = vld1q_u32(ptr32);120vsum0 = vpadalq_u32(vsum0, vtmp);121ptr32 += 4;122nbytes -= 16;123}124Assert(nbytes < 16);125126/* Add any trailing group of 8 bytes */127if (nbytes & 8)128{129uint32x2_t vtmp = vld1_u32(ptr32);130vsum0 = vaddw_u32(vsum0, vtmp);131ptr32 += 2;132nbytes -= 8;133}134Assert(nbytes < 8);135136uint64_t val = vaddlvq_u32(vreinterpretq_u32_u64(vsum0));137sum += val >> 32;138sum += (uint32_t) val;139140/* Handle any trailing 0..7 bytes */141sum = slurp_tail64(sum, ptr32, nbytes);142143fold:144return fold_and_swap(sum, swap);145}146147148