Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/lib/ldstfp.S
26442 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/*
3
* Floating-point, VMX/Altivec and VSX loads and stores
4
* for use in instruction emulation.
5
*
6
* Copyright 2010 Paul Mackerras, IBM Corp. <[email protected]>
7
*/
8
9
#include <asm/processor.h>
10
#include <asm/ppc_asm.h>
11
#include <asm/ppc-opcode.h>
12
#include <asm/reg.h>
13
#include <asm/asm-offsets.h>
14
#include <asm/asm-compat.h>
15
#include <linux/errno.h>
16
17
#define STKFRM (PPC_MIN_STKFRM + 16)
18
19
/* Get the contents of frN into *p; N is in r3 and p is in r4. */
20
_GLOBAL(get_fpr)
21
mflr r0
22
mfmsr r6
23
ori r7, r6, MSR_FP
24
MTMSRD(r7)
25
isync
26
rlwinm r3,r3,3,0xf8
27
bcl 20,31,1f
28
reg = 0
29
.rept 32
30
stfd reg, 0(r4)
31
b 2f
32
reg = reg + 1
33
.endr
34
1: mflr r5
35
add r5,r3,r5
36
mtctr r5
37
mtlr r0
38
bctr
39
2: MTMSRD(r6)
40
isync
41
blr
42
43
/* Put the contents of *p into frN; N is in r3 and p is in r4. */
44
_GLOBAL(put_fpr)
45
mflr r0
46
mfmsr r6
47
ori r7, r6, MSR_FP
48
MTMSRD(r7)
49
isync
50
rlwinm r3,r3,3,0xf8
51
bcl 20,31,1f
52
reg = 0
53
.rept 32
54
lfd reg, 0(r4)
55
b 2f
56
reg = reg + 1
57
.endr
58
1: mflr r5
59
add r5,r3,r5
60
mtctr r5
61
mtlr r0
62
bctr
63
2: MTMSRD(r6)
64
isync
65
blr
66
67
#ifdef CONFIG_ALTIVEC
68
/* Get the contents of vrN into *p; N is in r3 and p is in r4. */
69
_GLOBAL(get_vr)
70
mflr r0
71
mfmsr r6
72
oris r7, r6, MSR_VEC@h
73
MTMSRD(r7)
74
isync
75
rlwinm r3,r3,3,0xf8
76
bcl 20,31,1f
77
reg = 0
78
.rept 32
79
stvx reg, 0, r4
80
b 2f
81
reg = reg + 1
82
.endr
83
1: mflr r5
84
add r5,r3,r5
85
mtctr r5
86
mtlr r0
87
bctr
88
2: MTMSRD(r6)
89
isync
90
blr
91
92
/* Put the contents of *p into vrN; N is in r3 and p is in r4. */
93
_GLOBAL(put_vr)
94
mflr r0
95
mfmsr r6
96
oris r7, r6, MSR_VEC@h
97
MTMSRD(r7)
98
isync
99
rlwinm r3,r3,3,0xf8
100
bcl 20,31,1f
101
reg = 0
102
.rept 32
103
lvx reg, 0, r4
104
b 2f
105
reg = reg + 1
106
.endr
107
1: mflr r5
108
add r5,r3,r5
109
mtctr r5
110
mtlr r0
111
bctr
112
2: MTMSRD(r6)
113
isync
114
blr
115
#endif /* CONFIG_ALTIVEC */
116
117
#ifdef CONFIG_VSX
118
/* Get the contents of vsN into vs0; N is in r3. */
119
_GLOBAL(get_vsr)
120
mflr r0
121
rlwinm r3,r3,3,0x1f8
122
bcl 20,31,1f
123
blr /* vs0 is already in vs0 */
124
nop
125
reg = 1
126
.rept 63
127
XXLOR(0,reg,reg)
128
blr
129
reg = reg + 1
130
.endr
131
1: mflr r5
132
add r5,r3,r5
133
mtctr r5
134
mtlr r0
135
bctr
136
137
/* Put the contents of vs0 into vsN; N is in r3. */
138
_GLOBAL(put_vsr)
139
mflr r0
140
rlwinm r3,r3,3,0x1f8
141
bcl 20,31,1f
142
blr /* v0 is already in v0 */
143
nop
144
reg = 1
145
.rept 63
146
XXLOR(reg,0,0)
147
blr
148
reg = reg + 1
149
.endr
150
1: mflr r5
151
add r5,r3,r5
152
mtctr r5
153
mtlr r0
154
bctr
155
156
/* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */
157
_GLOBAL(load_vsrn)
158
PPC_STLU r1,-STKFRM(r1)
159
mflr r0
160
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
161
mfmsr r6
162
oris r7,r6,MSR_VSX@h
163
cmpwi cr7,r3,0
164
li r8,STKFRM-16
165
MTMSRD(r7)
166
isync
167
beq cr7,1f
168
STXVD2X(0,R1,R8)
169
1: LXVD2X(0,R0,R4)
170
#ifdef __LITTLE_ENDIAN__
171
XXSWAPD(0,0)
172
#endif
173
beq cr7,4f
174
bl put_vsr
175
LXVD2X(0,R1,R8)
176
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
177
mtlr r0
178
MTMSRD(r6)
179
isync
180
addi r1,r1,STKFRM
181
blr
182
183
/* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */
184
_GLOBAL(store_vsrn)
185
PPC_STLU r1,-STKFRM(r1)
186
mflr r0
187
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
188
mfmsr r6
189
oris r7,r6,MSR_VSX@h
190
li r8,STKFRM-16
191
MTMSRD(r7)
192
isync
193
STXVD2X(0,R1,R8)
194
bl get_vsr
195
#ifdef __LITTLE_ENDIAN__
196
XXSWAPD(0,0)
197
#endif
198
STXVD2X(0,R0,R4)
199
LXVD2X(0,R1,R8)
200
PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
201
mtlr r0
202
MTMSRD(r6)
203
isync
204
mr r3,r9
205
addi r1,r1,STKFRM
206
blr
207
#endif /* CONFIG_VSX */
208
209
/* Convert single-precision to double, without disturbing FPRs. */
210
/* conv_sp_to_dp(float *sp, double *dp) */
211
_GLOBAL(conv_sp_to_dp)
212
mfmsr r6
213
ori r7, r6, MSR_FP
214
MTMSRD(r7)
215
isync
216
stfd fr0, -16(r1)
217
lfs fr0, 0(r3)
218
stfd fr0, 0(r4)
219
lfd fr0, -16(r1)
220
MTMSRD(r6)
221
isync
222
blr
223
224
/* Convert single-precision to double, without disturbing FPRs. */
225
/* conv_sp_to_dp(double *dp, float *sp) */
226
_GLOBAL(conv_dp_to_sp)
227
mfmsr r6
228
ori r7, r6, MSR_FP
229
MTMSRD(r7)
230
isync
231
stfd fr0, -16(r1)
232
lfd fr0, 0(r3)
233
stfs fr0, 0(r4)
234
lfd fr0, -16(r1)
235
MTMSRD(r6)
236
isync
237
blr
238
239