Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/arm/lib/csumpartial.S
10817 views
1
/*
2
* linux/arch/arm/lib/csumpartial.S
3
*
4
* Copyright (C) 1995-1998 Russell King
5
*
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License version 2 as
8
* published by the Free Software Foundation.
9
*/
10
#include <linux/linkage.h>
11
#include <asm/assembler.h>
12
13
.text
14
15
/*
16
* Function: __u32 csum_partial(const char *src, int len, __u32 sum)
17
* Params : r0 = buffer, r1 = len, r2 = checksum
18
* Returns : r0 = new checksum
19
*/
20
21
buf .req r0
22
len .req r1
23
sum .req r2
24
td0 .req r3
25
td1 .req r4 @ save before use
26
td2 .req r5 @ save before use
27
td3 .req lr
28
29
.Lzero: mov r0, sum
30
add sp, sp, #4
31
ldr pc, [sp], #4
32
33
/*
34
* Handle 0 to 7 bytes, with any alignment of source and
35
* destination pointers. Note that when we get here, C = 0
36
*/
37
.Lless8: teq len, #0 @ check for zero count
38
beq .Lzero
39
40
/* we must have at least one byte. */
41
tst buf, #1 @ odd address?
42
movne sum, sum, ror #8
43
ldrneb td0, [buf], #1
44
subne len, len, #1
45
adcnes sum, sum, td0, put_byte_1
46
47
.Lless4: tst len, #6
48
beq .Lless8_byte
49
50
/* we are now half-word aligned */
51
52
.Lless8_wordlp:
53
#if __LINUX_ARM_ARCH__ >= 4
54
ldrh td0, [buf], #2
55
sub len, len, #2
56
#else
57
ldrb td0, [buf], #1
58
ldrb td3, [buf], #1
59
sub len, len, #2
60
#ifndef __ARMEB__
61
orr td0, td0, td3, lsl #8
62
#else
63
orr td0, td3, td0, lsl #8
64
#endif
65
#endif
66
adcs sum, sum, td0
67
tst len, #6
68
bne .Lless8_wordlp
69
70
.Lless8_byte: tst len, #1 @ odd number of bytes
71
ldrneb td0, [buf], #1 @ include last byte
72
adcnes sum, sum, td0, put_byte_0 @ update checksum
73
74
.Ldone: adc r0, sum, #0 @ collect up the last carry
75
ldr td0, [sp], #4
76
tst td0, #1 @ check buffer alignment
77
movne r0, r0, ror #8 @ rotate checksum by 8 bits
78
ldr pc, [sp], #4 @ return
79
80
.Lnot_aligned: tst buf, #1 @ odd address
81
ldrneb td0, [buf], #1 @ make even
82
subne len, len, #1
83
adcnes sum, sum, td0, put_byte_1 @ update checksum
84
85
tst buf, #2 @ 32-bit aligned?
86
#if __LINUX_ARM_ARCH__ >= 4
87
ldrneh td0, [buf], #2 @ make 32-bit aligned
88
subne len, len, #2
89
#else
90
ldrneb td0, [buf], #1
91
ldrneb ip, [buf], #1
92
subne len, len, #2
93
#ifndef __ARMEB__
94
orrne td0, td0, ip, lsl #8
95
#else
96
orrne td0, ip, td0, lsl #8
97
#endif
98
#endif
99
adcnes sum, sum, td0 @ update checksum
100
mov pc, lr
101
102
ENTRY(csum_partial)
103
stmfd sp!, {buf, lr}
104
cmp len, #8 @ Ensure that we have at least
105
blo .Lless8 @ 8 bytes to copy.
106
107
tst buf, #1
108
movne sum, sum, ror #8
109
110
adds sum, sum, #0 @ C = 0
111
tst buf, #3 @ Test destination alignment
112
blne .Lnot_aligned @ align destination, return here
113
114
1: bics ip, len, #31
115
beq 3f
116
117
stmfd sp!, {r4 - r5}
118
2: ldmia buf!, {td0, td1, td2, td3}
119
adcs sum, sum, td0
120
adcs sum, sum, td1
121
adcs sum, sum, td2
122
adcs sum, sum, td3
123
ldmia buf!, {td0, td1, td2, td3}
124
adcs sum, sum, td0
125
adcs sum, sum, td1
126
adcs sum, sum, td2
127
adcs sum, sum, td3
128
sub ip, ip, #32
129
teq ip, #0
130
bne 2b
131
ldmfd sp!, {r4 - r5}
132
133
3: tst len, #0x1c @ should not change C
134
beq .Lless4
135
136
4: ldr td0, [buf], #4
137
sub len, len, #4
138
adcs sum, sum, td0
139
tst len, #0x1c
140
bne 4b
141
b .Lless4
142
ENDPROC(csum_partial)
143
144