Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/include/asm/checksum.h
26481 views
1
/*
2
* This file is subject to the terms and conditions of the GNU General Public
3
* License. See the file "COPYING" in the main directory of this archive
4
* for more details.
5
*
6
* Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle
7
* Copyright (C) 1999 Silicon Graphics, Inc.
8
* Copyright (C) 2001 Thiemo Seufer.
9
* Copyright (C) 2002 Maciej W. Rozycki
10
* Copyright (C) 2014 Imagination Technologies Ltd.
11
*/
12
#ifndef _ASM_CHECKSUM_H
13
#define _ASM_CHECKSUM_H
14
15
#ifdef CONFIG_GENERIC_CSUM
16
#include <asm-generic/checksum.h>
17
#else
18
19
#include <linux/in6.h>
20
21
#include <linux/uaccess.h>
22
23
/*
24
* computes the checksum of a memory block at buff, length len,
25
* and adds in "sum" (32-bit)
26
*
27
* returns a 32-bit number suitable for feeding into itself
28
* or csum_tcpudp_magic
29
*
30
* this function must be called with even lengths, except
31
* for the last fragment, which may be odd
32
*
33
* it's best to have buff aligned on a 32-bit boundary
34
*/
35
__wsum csum_partial(const void *buff, int len, __wsum sum);
36
37
__wsum __csum_partial_copy_from_user(const void __user *src, void *dst, int len);
38
__wsum __csum_partial_copy_to_user(const void *src, void __user *dst, int len);
39
40
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
41
static inline
42
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
43
{
44
might_fault();
45
if (!access_ok(src, len))
46
return 0;
47
return __csum_partial_copy_from_user(src, dst, len);
48
}
49
50
/*
51
* Copy and checksum to user
52
*/
53
#define HAVE_CSUM_COPY_USER
54
static inline
55
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
56
{
57
might_fault();
58
if (!access_ok(dst, len))
59
return 0;
60
return __csum_partial_copy_to_user(src, dst, len);
61
}
62
63
/*
64
* the same as csum_partial, but copies from user space (but on MIPS
65
* we have just one address space, so this is identical to the above)
66
*/
67
#define _HAVE_ARCH_CSUM_AND_COPY
68
__wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len);
69
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
70
{
71
return __csum_partial_copy_nocheck(src, dst, len);
72
}
73
74
/*
75
* Fold a partial checksum without adding pseudo headers
76
*/
77
static inline __sum16 csum_fold(__wsum csum)
78
{
79
u32 sum = (__force u32)csum;
80
81
sum += (sum << 16);
82
csum = (__force __wsum)(sum < (__force u32)csum);
83
sum >>= 16;
84
sum += (__force u32)csum;
85
86
return (__force __sum16)~sum;
87
}
88
#define csum_fold csum_fold
89
90
/*
91
* This is a version of ip_compute_csum() optimized for IP headers,
92
* which always checksum on 4 octet boundaries.
93
*
94
* By Jorge Cwik <[email protected]>, adapted for linux by
95
* Arnt Gulbrandsen.
96
*/
97
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
98
{
99
const unsigned int *word = iph;
100
const unsigned int *stop = word + ihl;
101
unsigned int csum;
102
int carry;
103
104
csum = word[0];
105
csum += word[1];
106
carry = (csum < word[1]);
107
csum += carry;
108
109
csum += word[2];
110
carry = (csum < word[2]);
111
csum += carry;
112
113
csum += word[3];
114
carry = (csum < word[3]);
115
csum += carry;
116
117
word += 4;
118
do {
119
csum += *word;
120
carry = (csum < *word);
121
csum += carry;
122
word++;
123
} while (word != stop);
124
125
return csum_fold(csum);
126
}
127
#define ip_fast_csum ip_fast_csum
128
129
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
130
__u32 len, __u8 proto,
131
__wsum isum)
132
{
133
const unsigned int sh32 = IS_ENABLED(CONFIG_64BIT) ? 32 : 0;
134
unsigned long sum = (__force unsigned long)daddr;
135
unsigned long tmp;
136
__u32 osum;
137
138
tmp = (__force unsigned long)saddr;
139
sum += tmp;
140
141
if (IS_ENABLED(CONFIG_32BIT))
142
sum += sum < tmp;
143
144
/*
145
* We know PROTO + LEN has the sign bit clear, so cast to a signed
146
* type to avoid an extraneous zero-extension where TMP is 64-bit.
147
*/
148
tmp = (__s32)(proto + len);
149
tmp <<= IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? 8 : 0;
150
sum += tmp;
151
if (IS_ENABLED(CONFIG_32BIT))
152
sum += sum < tmp;
153
154
tmp = (__force unsigned long)isum;
155
sum += tmp;
156
157
if (IS_ENABLED(CONFIG_32BIT)) {
158
sum += sum < tmp;
159
osum = sum;
160
} else if (IS_ENABLED(CONFIG_64BIT)) {
161
tmp = sum << sh32;
162
sum += tmp;
163
osum = sum < tmp;
164
osum += sum >> sh32;
165
} else {
166
BUILD_BUG();
167
}
168
169
return (__force __wsum)osum;
170
}
171
#define csum_tcpudp_nofold csum_tcpudp_nofold
172
173
/*
174
* this routine is used for miscellaneous IP-like checksums, mainly
175
* in icmp.c
176
*/
177
static inline __sum16 ip_compute_csum(const void *buff, int len)
178
{
179
return csum_fold(csum_partial(buff, len, 0));
180
}
181
182
#define _HAVE_ARCH_IPV6_CSUM
183
static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
184
const struct in6_addr *daddr,
185
__u32 len, __u8 proto,
186
__wsum sum)
187
{
188
__wsum tmp;
189
190
__asm__(
191
" .set push # csum_ipv6_magic\n"
192
" .set noreorder \n"
193
" .set noat \n"
194
" addu %0, %5 # proto (long in network byte order)\n"
195
" sltu $1, %0, %5 \n"
196
" addu %0, $1 \n"
197
198
" addu %0, %6 # csum\n"
199
" sltu $1, %0, %6 \n"
200
" lw %1, 0(%2) # four words source address\n"
201
" addu %0, $1 \n"
202
" addu %0, %1 \n"
203
" sltu $1, %0, %1 \n"
204
205
" lw %1, 4(%2) \n"
206
" addu %0, $1 \n"
207
" addu %0, %1 \n"
208
" sltu $1, %0, %1 \n"
209
210
" lw %1, 8(%2) \n"
211
" addu %0, $1 \n"
212
" addu %0, %1 \n"
213
" sltu $1, %0, %1 \n"
214
215
" lw %1, 12(%2) \n"
216
" addu %0, $1 \n"
217
" addu %0, %1 \n"
218
" sltu $1, %0, %1 \n"
219
220
" lw %1, 0(%3) \n"
221
" addu %0, $1 \n"
222
" addu %0, %1 \n"
223
" sltu $1, %0, %1 \n"
224
225
" lw %1, 4(%3) \n"
226
" addu %0, $1 \n"
227
" addu %0, %1 \n"
228
" sltu $1, %0, %1 \n"
229
230
" lw %1, 8(%3) \n"
231
" addu %0, $1 \n"
232
" addu %0, %1 \n"
233
" sltu $1, %0, %1 \n"
234
235
" lw %1, 12(%3) \n"
236
" addu %0, $1 \n"
237
" addu %0, %1 \n"
238
" sltu $1, %0, %1 \n"
239
240
" addu %0, $1 # Add final carry\n"
241
" .set pop"
242
: "=&r" (sum), "=&r" (tmp)
243
: "r" (saddr), "r" (daddr),
244
"0" (htonl(len)), "r" (htonl(proto)), "r" (sum)
245
: "memory");
246
247
return csum_fold(sum);
248
}
249
250
#include <asm-generic/checksum.h>
251
#endif /* CONFIG_GENERIC_CSUM */
252
253
#endif /* _ASM_CHECKSUM_H */
254
255