Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/include/asm/checksum.h
26295 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* arch/arm/include/asm/checksum.h
4
*
5
* IP checksum routines
6
*
7
* Copyright (C) Original authors of ../asm-i386/checksum.h
8
* Copyright (C) 1996-1999 Russell King
9
*/
10
#ifndef __ASM_ARM_CHECKSUM_H
11
#define __ASM_ARM_CHECKSUM_H
12
13
#include <linux/in6.h>
14
#include <linux/uaccess.h>
15
16
/*
17
* computes the checksum of a memory block at buff, length len,
18
* and adds in "sum" (32-bit)
19
*
20
* returns a 32-bit number suitable for feeding into itself
21
* or csum_tcpudp_magic
22
*
23
* this function must be called with even lengths, except
24
* for the last fragment, which may be odd
25
*
26
* it's best to have buff aligned on a 32-bit boundary
27
*/
28
__wsum csum_partial(const void *buff, int len, __wsum sum);
29
30
/*
31
* the same as csum_partial, but copies from src while it
32
* checksums, and handles user-space pointer exceptions correctly, when needed.
33
*
34
* here even more important to align src and dst on a 32-bit (or even
35
* better 64-bit) boundary
36
*/
37
38
__wsum
39
csum_partial_copy_nocheck(const void *src, void *dst, int len);
40
41
__wsum
42
csum_partial_copy_from_user(const void __user *src, void *dst, int len);
43
44
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
45
#define _HAVE_ARCH_CSUM_AND_COPY
46
static inline
47
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
48
{
49
if (!access_ok(src, len))
50
return 0;
51
52
return csum_partial_copy_from_user(src, dst, len);
53
}
54
55
/*
56
* Fold a partial checksum without adding pseudo headers
57
*/
58
static inline __sum16 csum_fold(__wsum sum)
59
{
60
__asm__(
61
"add %0, %1, %1, ror #16 @ csum_fold"
62
: "=r" (sum)
63
: "r" (sum)
64
: "cc");
65
return (__force __sum16)(~(__force u32)sum >> 16);
66
}
67
68
/*
69
* This is a version of ip_compute_csum() optimized for IP headers,
70
* which always checksum on 4 octet boundaries.
71
*/
72
static inline __sum16
73
ip_fast_csum(const void *iph, unsigned int ihl)
74
{
75
unsigned int tmp1;
76
__wsum sum;
77
78
__asm__ __volatile__(
79
"ldr %0, [%1], #4 @ ip_fast_csum \n\
80
ldr %3, [%1], #4 \n\
81
sub %2, %2, #5 \n\
82
adds %0, %0, %3 \n\
83
ldr %3, [%1], #4 \n\
84
adcs %0, %0, %3 \n\
85
ldr %3, [%1], #4 \n\
86
1: adcs %0, %0, %3 \n\
87
ldr %3, [%1], #4 \n\
88
tst %2, #15 @ do this carefully \n\
89
subne %2, %2, #1 @ without destroying \n\
90
bne 1b @ the carry flag \n\
91
adcs %0, %0, %3 \n\
92
adc %0, %0, #0"
93
: "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
94
: "1" (iph), "2" (ihl)
95
: "cc", "memory");
96
return csum_fold(sum);
97
}
98
99
static inline __wsum
100
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
101
__u8 proto, __wsum sum)
102
{
103
u32 lenprot = len + proto;
104
if (__builtin_constant_p(sum) && sum == 0) {
105
__asm__(
106
"adds %0, %1, %2 @ csum_tcpudp_nofold0 \n\t"
107
#ifdef __ARMEB__
108
"adcs %0, %0, %3 \n\t"
109
#else
110
"adcs %0, %0, %3, ror #8 \n\t"
111
#endif
112
"adc %0, %0, #0"
113
: "=&r" (sum)
114
: "r" (daddr), "r" (saddr), "r" (lenprot)
115
: "cc");
116
} else {
117
__asm__(
118
"adds %0, %1, %2 @ csum_tcpudp_nofold \n\t"
119
"adcs %0, %0, %3 \n\t"
120
#ifdef __ARMEB__
121
"adcs %0, %0, %4 \n\t"
122
#else
123
"adcs %0, %0, %4, ror #8 \n\t"
124
#endif
125
"adc %0, %0, #0"
126
: "=&r"(sum)
127
: "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
128
: "cc");
129
}
130
return sum;
131
}
132
/*
133
* computes the checksum of the TCP/UDP pseudo-header
134
* returns a 16-bit checksum, already complemented
135
*/
136
static inline __sum16
137
csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
138
__u8 proto, __wsum sum)
139
{
140
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
141
}
142
143
144
/*
145
* this routine is used for miscellaneous IP-like checksums, mainly
146
* in icmp.c
147
*/
148
static inline __sum16
149
ip_compute_csum(const void *buff, int len)
150
{
151
return csum_fold(csum_partial(buff, len, 0));
152
}
153
154
#define _HAVE_ARCH_IPV6_CSUM
155
extern __wsum
156
__csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
157
__be32 proto, __wsum sum);
158
159
static inline __sum16
160
csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
161
__u32 len, __u8 proto, __wsum sum)
162
{
163
return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
164
htonl(proto), sum));
165
}
166
#endif
167
168