Path: blob/main/contrib/arm-optimized-routines/networking/arm/chksum_simd.c
109239 views
/*1* Armv7-A specific checksum implementation using NEON2*3* Copyright (c) 2020, Arm Limited.4* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception5*/67#include "networking.h"8#include "../chksum_common.h"910#ifndef __ARM_NEON11#pragma GCC target("+simd")12#endif1314#include <arm_neon.h>1516unsigned short17__chksum_arm_simd(const void *ptr, unsigned int nbytes)18{19bool swap = (uintptr_t) ptr & 1;20uint64x1_t vsum = { 0 };2122if (unlikely(nbytes < 40))23{24uint64_t sum = slurp_small(ptr, nbytes);25return fold_and_swap(sum, false);26}2728/* 8-byte align pointer */29/* Inline slurp_head-like code since we use NEON here */30Assert(nbytes >= 8);31uint32_t off = (uintptr_t) ptr & 7;32if (likely(off != 0))33{34const uint64_t *may_alias ptr64 = align_ptr(ptr, 8);35uint64x1_t vword64 = vld1_u64(ptr64);36/* Get rid of bytes 0..off-1 */37uint64x1_t vmask = vdup_n_u64(ALL_ONES);38int64x1_t vshiftl = vdup_n_s64(CHAR_BIT * off);39vmask = vshl_u64(vmask, vshiftl);40vword64 = vand_u64(vword64, vmask);41uint32x2_t vtmp = vreinterpret_u32_u64(vword64);42/* Set accumulator */43vsum = vpaddl_u32(vtmp);44/* Update pointer and remaining size */45ptr = (char *) ptr64 + 8;46nbytes -= 8 - off;47}48Assert(((uintptr_t) ptr & 7) == 0);4950/* Sum groups of 64 bytes */51uint64x2_t vsum0 = { 0, 0 };52uint64x2_t vsum1 = { 0, 0 };53uint64x2_t vsum2 = { 0, 0 };54uint64x2_t vsum3 = { 0, 0 };55const uint32_t *may_alias ptr32 = ptr;56for (uint32_t i = 0; i < nbytes / 64; i++)57{58uint32x4_t vtmp0 = vld1q_u32(ptr32);59uint32x4_t vtmp1 = vld1q_u32(ptr32 + 4);60uint32x4_t vtmp2 = vld1q_u32(ptr32 + 8);61uint32x4_t vtmp3 = vld1q_u32(ptr32 + 12);62vsum0 = vpadalq_u32(vsum0, vtmp0);63vsum1 = vpadalq_u32(vsum1, vtmp1);64vsum2 = vpadalq_u32(vsum2, vtmp2);65vsum3 = vpadalq_u32(vsum3, vtmp3);66ptr32 += 16;67}68nbytes %= 64;6970/* Fold vsum1/vsum2/vsum3 into vsum0 */71vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum2));72vsum1 = vpadalq_u32(vsum1, vreinterpretq_u32_u64(vsum3));73vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum1));7475/* Add any trailing 16-byte groups */76while (likely(nbytes >= 16))77{78uint32x4_t vtmp0 = vld1q_u32(ptr32);79vsum0 = vpadalq_u32(vsum0, vtmp0);80ptr32 += 4;81nbytes -= 16;82}83Assert(nbytes < 16);8485/* Fold vsum0 into vsum */86{87/* 4xu32 (4x32b) -> 2xu64 (2x33b) */88vsum0 = vpaddlq_u32(vreinterpretq_u32_u64(vsum0));89/* 4xu32 (2x(1b+32b)) -> 2xu64 (2x(0b+32b)) */90vsum0 = vpaddlq_u32(vreinterpretq_u32_u64(vsum0));91/* 4xu32 (4x32b) -> 2xu64 (2x33b) */92Assert((vgetq_lane_u64(vsum0, 0) >> 32) == 0);93Assert((vgetq_lane_u64(vsum0, 1) >> 32) == 0);94uint32x2_t vtmp = vmovn_u64(vsum0);95/* Add to accumulator */96vsum = vpadal_u32(vsum, vtmp);97}9899/* Add any trailing group of 8 bytes */100if (nbytes & 8)101{102uint32x2_t vtmp = vld1_u32(ptr32);103/* Add to accumulator */104vsum = vpadal_u32(vsum, vtmp);105ptr32 += 2;106nbytes -= 8;107}108Assert(nbytes < 8);109110/* Handle any trailing 1..7 bytes */111if (likely(nbytes != 0))112{113Assert(((uintptr_t) ptr32 & 7) == 0);114Assert(nbytes < 8);115uint64x1_t vword64 = vld1_u64((const uint64_t *) ptr32);116/* Get rid of bytes 7..nbytes */117uint64x1_t vmask = vdup_n_u64(ALL_ONES);118int64x1_t vshiftr = vdup_n_s64(-CHAR_BIT * (8 - nbytes));119vmask = vshl_u64(vmask, vshiftr);/* Shift right */120vword64 = vand_u64(vword64, vmask);121/* Fold 64-bit sum to 33 bits */122vword64 = vpaddl_u32(vreinterpret_u32_u64(vword64));123/* Add to accumulator */124vsum = vpadal_u32(vsum, vreinterpret_u32_u64(vword64));125}126127/* Fold 64-bit vsum to 32 bits */128vsum = vpaddl_u32(vreinterpret_u32_u64(vsum));129vsum = vpaddl_u32(vreinterpret_u32_u64(vsum));130Assert(vget_lane_u32(vreinterpret_u32_u64(vsum), 1) == 0);131132/* Fold 32-bit vsum to 16 bits */133uint32x2_t vsum32 = vreinterpret_u32_u64(vsum);134vsum32 = vpaddl_u16(vreinterpret_u16_u32(vsum32));135vsum32 = vpaddl_u16(vreinterpret_u16_u32(vsum32));136Assert(vget_lane_u16(vreinterpret_u16_u32(vsum32), 1) == 0);137Assert(vget_lane_u16(vreinterpret_u16_u32(vsum32), 2) == 0);138Assert(vget_lane_u16(vreinterpret_u16_u32(vsum32), 3) == 0);139140/* Convert to 16-bit scalar */141uint16_t sum = vget_lane_u16(vreinterpret_u16_u32(vsum32), 0);142143if (unlikely(swap))/* Odd base pointer is unexpected */144{145sum = bswap16(sum);146}147return sum;148}149150151