Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/arm-optimized-routines/networking/aarch64/chksum_simd.c
48255 views
1
/*
2
* AArch64-specific checksum implementation using NEON
3
*
4
* Copyright (c) 2020, Arm Limited.
5
* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6
*/
7
8
#include "networking.h"
9
#include "../chksum_common.h"
10
11
#ifndef __ARM_NEON
12
#pragma GCC target("+simd")
13
#endif
14
15
#include <arm_neon.h>
16
17
always_inline
18
static inline uint64_t
19
slurp_head64(const void **pptr, uint32_t *nbytes)
20
{
21
Assert(*nbytes >= 8);
22
uint64_t sum = 0;
23
uint32_t off = (uintptr_t) *pptr % 8;
24
if (likely(off != 0))
25
{
26
/* Get rid of bytes 0..off-1 */
27
const unsigned char *ptr64 = align_ptr(*pptr, 8);
28
uint64_t mask = ALL_ONES << (CHAR_BIT * off);
29
uint64_t val = load64(ptr64) & mask;
30
/* Fold 64-bit sum to 33 bits */
31
sum = val >> 32;
32
sum += (uint32_t) val;
33
*pptr = ptr64 + 8;
34
*nbytes -= 8 - off;
35
}
36
return sum;
37
}
38
39
always_inline
40
static inline uint64_t
41
slurp_tail64(uint64_t sum, const void *ptr, uint32_t nbytes)
42
{
43
Assert(nbytes < 8);
44
if (likely(nbytes != 0))
45
{
46
/* Get rid of bytes 7..nbytes */
47
uint64_t mask = ALL_ONES >> (CHAR_BIT * (8 - nbytes));
48
Assert(__builtin_popcountl(mask) / CHAR_BIT == nbytes);
49
uint64_t val = load64(ptr) & mask;
50
sum += val >> 32;
51
sum += (uint32_t) val;
52
nbytes = 0;
53
}
54
Assert(nbytes == 0);
55
return sum;
56
}
57
58
unsigned short
59
__chksum_aarch64_simd(const void *ptr, unsigned int nbytes)
60
{
61
bool swap = (uintptr_t) ptr & 1;
62
uint64_t sum;
63
64
if (unlikely(nbytes < 50))
65
{
66
sum = slurp_small(ptr, nbytes);
67
swap = false;
68
goto fold;
69
}
70
71
/* 8-byte align pointer */
72
Assert(nbytes >= 8);
73
sum = slurp_head64(&ptr, &nbytes);
74
Assert(((uintptr_t) ptr & 7) == 0);
75
76
const uint32_t *may_alias ptr32 = ptr;
77
78
uint64x2_t vsum0 = { 0, 0 };
79
uint64x2_t vsum1 = { 0, 0 };
80
uint64x2_t vsum2 = { 0, 0 };
81
uint64x2_t vsum3 = { 0, 0 };
82
83
/* Sum groups of 64 bytes */
84
for (uint32_t i = 0; i < nbytes / 64; i++)
85
{
86
uint32x4_t vtmp0 = vld1q_u32(ptr32);
87
uint32x4_t vtmp1 = vld1q_u32(ptr32 + 4);
88
uint32x4_t vtmp2 = vld1q_u32(ptr32 + 8);
89
uint32x4_t vtmp3 = vld1q_u32(ptr32 + 12);
90
vsum0 = vpadalq_u32(vsum0, vtmp0);
91
vsum1 = vpadalq_u32(vsum1, vtmp1);
92
vsum2 = vpadalq_u32(vsum2, vtmp2);
93
vsum3 = vpadalq_u32(vsum3, vtmp3);
94
ptr32 += 16;
95
}
96
nbytes %= 64;
97
98
/* Fold vsum2 and vsum3 into vsum0 and vsum1 */
99
vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum2));
100
vsum1 = vpadalq_u32(vsum1, vreinterpretq_u32_u64(vsum3));
101
102
/* Add any trailing group of 32 bytes */
103
if (nbytes & 32)
104
{
105
uint32x4_t vtmp0 = vld1q_u32(ptr32);
106
uint32x4_t vtmp1 = vld1q_u32(ptr32 + 4);
107
vsum0 = vpadalq_u32(vsum0, vtmp0);
108
vsum1 = vpadalq_u32(vsum1, vtmp1);
109
ptr32 += 8;
110
nbytes -= 32;
111
}
112
Assert(nbytes < 32);
113
114
/* Fold vsum1 into vsum0 */
115
vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum1));
116
117
/* Add any trailing group of 16 bytes */
118
if (nbytes & 16)
119
{
120
uint32x4_t vtmp = vld1q_u32(ptr32);
121
vsum0 = vpadalq_u32(vsum0, vtmp);
122
ptr32 += 4;
123
nbytes -= 16;
124
}
125
Assert(nbytes < 16);
126
127
/* Add any trailing group of 8 bytes */
128
if (nbytes & 8)
129
{
130
uint32x2_t vtmp = vld1_u32(ptr32);
131
vsum0 = vaddw_u32(vsum0, vtmp);
132
ptr32 += 2;
133
nbytes -= 8;
134
}
135
Assert(nbytes < 8);
136
137
uint64_t val = vaddlvq_u32(vreinterpretq_u32_u64(vsum0));
138
sum += val >> 32;
139
sum += (uint32_t) val;
140
141
/* Handle any trailing 0..7 bytes */
142
sum = slurp_tail64(sum, ptr32, nbytes);
143
144
fold:
145
return fold_and_swap(sum, swap);
146
}
147
148