Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/lib/csumpartial.S
26292 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* linux/arch/arm/lib/csumpartial.S
4
*
5
* Copyright (C) 1995-1998 Russell King
6
*/
7
#include <linux/linkage.h>
8
#include <asm/assembler.h>
9
10
.text
11
12
/*
13
* Function: __u32 csum_partial(const char *src, int len, __u32 sum)
14
* Params : r0 = buffer, r1 = len, r2 = checksum
15
* Returns : r0 = new checksum
16
*/
17
18
buf .req r0
19
len .req r1
20
sum .req r2
21
td0 .req r3
22
td1 .req r4 @ save before use
23
td2 .req r5 @ save before use
24
td3 .req lr
25
26
.Lzero: mov r0, sum
27
add sp, sp, #4
28
ldr pc, [sp], #4
29
30
/*
31
* Handle 0 to 7 bytes, with any alignment of source and
32
* destination pointers. Note that when we get here, C = 0
33
*/
34
.Lless8: teq len, #0 @ check for zero count
35
beq .Lzero
36
37
/* we must have at least one byte. */
38
tst buf, #1 @ odd address?
39
movne sum, sum, ror #8
40
ldrbne td0, [buf], #1
41
subne len, len, #1
42
adcsne sum, sum, td0, put_byte_1
43
44
.Lless4: tst len, #6
45
beq .Lless8_byte
46
47
/* we are now half-word aligned */
48
49
.Lless8_wordlp:
50
#if __LINUX_ARM_ARCH__ >= 4
51
ldrh td0, [buf], #2
52
sub len, len, #2
53
#else
54
ldrb td0, [buf], #1
55
ldrb td3, [buf], #1
56
sub len, len, #2
57
#ifndef __ARMEB__
58
orr td0, td0, td3, lsl #8
59
#else
60
orr td0, td3, td0, lsl #8
61
#endif
62
#endif
63
adcs sum, sum, td0
64
tst len, #6
65
bne .Lless8_wordlp
66
67
.Lless8_byte: tst len, #1 @ odd number of bytes
68
ldrbne td0, [buf], #1 @ include last byte
69
adcsne sum, sum, td0, put_byte_0 @ update checksum
70
71
.Ldone: adc r0, sum, #0 @ collect up the last carry
72
ldr td0, [sp], #4
73
tst td0, #1 @ check buffer alignment
74
movne r0, r0, ror #8 @ rotate checksum by 8 bits
75
ldr pc, [sp], #4 @ return
76
77
.Lnot_aligned: tst buf, #1 @ odd address
78
ldrbne td0, [buf], #1 @ make even
79
subne len, len, #1
80
adcsne sum, sum, td0, put_byte_1 @ update checksum
81
82
tst buf, #2 @ 32-bit aligned?
83
#if __LINUX_ARM_ARCH__ >= 4
84
ldrhne td0, [buf], #2 @ make 32-bit aligned
85
subne len, len, #2
86
#else
87
ldrbne td0, [buf], #1
88
ldrbne ip, [buf], #1
89
subne len, len, #2
90
#ifndef __ARMEB__
91
orrne td0, td0, ip, lsl #8
92
#else
93
orrne td0, ip, td0, lsl #8
94
#endif
95
#endif
96
adcsne sum, sum, td0 @ update checksum
97
ret lr
98
99
ENTRY(csum_partial)
100
stmfd sp!, {buf, lr}
101
cmp len, #8 @ Ensure that we have at least
102
blo .Lless8 @ 8 bytes to copy.
103
104
tst buf, #1
105
movne sum, sum, ror #8
106
107
adds sum, sum, #0 @ C = 0
108
tst buf, #3 @ Test destination alignment
109
blne .Lnot_aligned @ align destination, return here
110
111
1: bics ip, len, #31
112
beq 3f
113
114
stmfd sp!, {r4 - r5}
115
2: ldmia buf!, {td0, td1, td2, td3}
116
adcs sum, sum, td0
117
adcs sum, sum, td1
118
adcs sum, sum, td2
119
adcs sum, sum, td3
120
ldmia buf!, {td0, td1, td2, td3}
121
adcs sum, sum, td0
122
adcs sum, sum, td1
123
adcs sum, sum, td2
124
adcs sum, sum, td3
125
sub ip, ip, #32
126
teq ip, #0
127
bne 2b
128
ldmfd sp!, {r4 - r5}
129
130
3: tst len, #0x1c @ should not change C
131
beq .Lless4
132
133
4: ldr td0, [buf], #4
134
sub len, len, #4
135
adcs sum, sum, td0
136
tst len, #0x1c
137
bne 4b
138
b .Lless4
139
ENDPROC(csum_partial)
140
141