Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/platforms/powermac/cache.S
26481 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/*
3
* This file contains low-level cache management functions
4
* used for sleep and CPU speed changes on Apple machines.
5
* (In fact the only thing that is Apple-specific is that we assume
6
* that we can read from ROM at physical address 0xfff00000.)
7
*
8
* Copyright (C) 2004 Paul Mackerras ([email protected]) and
9
* Benjamin Herrenschmidt ([email protected])
10
*/
11
12
#include <asm/processor.h>
13
#include <asm/ppc_asm.h>
14
#include <asm/cputable.h>
15
#include <asm/feature-fixups.h>
16
17
/*
18
* Flush and disable all data caches (dL1, L2, L3). This is used
19
* when going to sleep, when doing a PMU based cpufreq transition,
20
* or when "offlining" a CPU on SMP machines. This code is over
21
* paranoid, but I've had enough issues with various CPU revs and
22
* bugs that I decided it was worth being over cautious
23
*/
24
25
_GLOBAL(flush_disable_caches)
26
#ifndef CONFIG_PPC_BOOK3S_32
27
blr
28
#else
29
BEGIN_FTR_SECTION
30
b flush_disable_745x
31
END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
32
BEGIN_FTR_SECTION
33
b flush_disable_75x
34
END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
35
b __flush_disable_L1
36
37
/* This is the code for G3 and 74[01]0 */
38
flush_disable_75x:
39
mflr r10
40
41
/* Turn off EE and DR in MSR */
42
mfmsr r11
43
rlwinm r0,r11,0,~MSR_EE
44
rlwinm r0,r0,0,~MSR_DR
45
sync
46
mtmsr r0
47
isync
48
49
/* Stop DST streams */
50
BEGIN_FTR_SECTION
51
PPC_DSSALL
52
sync
53
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
54
55
/* Stop DPM */
56
mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
57
rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
58
sync
59
mtspr SPRN_HID0,r4 /* Disable DPM */
60
sync
61
62
/* Disp-flush L1. We have a weird problem here that I never
63
* totally figured out. On 750FX, using the ROM for the flush
64
* results in a non-working flush. We use that workaround for
65
* now until I finally understand what's going on. --BenH
66
*/
67
68
/* ROM base by default */
69
lis r4,0xfff0
70
mfpvr r3
71
srwi r3,r3,16
72
cmplwi cr0,r3,0x7000
73
bne+ 1f
74
/* RAM base on 750FX */
75
li r4,0
76
1: li r4,0x4000
77
mtctr r4
78
1: lwz r0,0(r4)
79
addi r4,r4,32
80
bdnz 1b
81
sync
82
isync
83
84
/* Disable / invalidate / enable L1 data */
85
mfspr r3,SPRN_HID0
86
rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)
87
mtspr SPRN_HID0,r3
88
sync
89
isync
90
ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
91
sync
92
isync
93
mtspr SPRN_HID0,r3
94
xori r3,r3,(HID0_DCI|HID0_ICFI)
95
mtspr SPRN_HID0,r3
96
sync
97
98
/* Get the current enable bit of the L2CR into r4 */
99
mfspr r5,SPRN_L2CR
100
/* Set to data-only (pre-745x bit) */
101
oris r3,r5,L2CR_L2DO@h
102
b 2f
103
/* When disabling L2, code must be in L1 */
104
.balign 32
105
1: mtspr SPRN_L2CR,r3
106
3: sync
107
isync
108
b 1f
109
2: b 3f
110
3: sync
111
isync
112
b 1b
113
1: /* disp-flush L2. The interesting thing here is that the L2 can be
114
* up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
115
* but that is probbaly fine. We disp-flush over 4Mb to be safe
116
*/
117
lis r4,2
118
mtctr r4
119
lis r4,0xfff0
120
1: lwz r0,0(r4)
121
addi r4,r4,32
122
bdnz 1b
123
sync
124
isync
125
lis r4,2
126
mtctr r4
127
lis r4,0xfff0
128
1: dcbf 0,r4
129
addi r4,r4,32
130
bdnz 1b
131
sync
132
isync
133
134
/* now disable L2 */
135
rlwinm r5,r5,0,~L2CR_L2E
136
b 2f
137
/* When disabling L2, code must be in L1 */
138
.balign 32
139
1: mtspr SPRN_L2CR,r5
140
3: sync
141
isync
142
b 1f
143
2: b 3f
144
3: sync
145
isync
146
b 1b
147
1: sync
148
isync
149
/* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
150
oris r4,r5,L2CR_L2I@h
151
mtspr SPRN_L2CR,r4
152
sync
153
isync
154
155
/* Wait for the invalidation to complete */
156
1: mfspr r3,SPRN_L2CR
157
rlwinm. r0,r3,0,31,31
158
bne 1b
159
160
/* Clear L2I */
161
xoris r4,r4,L2CR_L2I@h
162
sync
163
mtspr SPRN_L2CR,r4
164
sync
165
166
/* now disable the L1 data cache */
167
mfspr r0,SPRN_HID0
168
rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)
169
mtspr SPRN_HID0,r0
170
sync
171
isync
172
173
/* Restore HID0[DPM] to whatever it was before */
174
sync
175
mfspr r0,SPRN_HID0
176
rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */
177
mtspr SPRN_HID0,r0
178
sync
179
180
/* restore DR and EE */
181
sync
182
mtmsr r11
183
isync
184
185
mtlr r10
186
blr
187
_ASM_NOKPROBE_SYMBOL(flush_disable_75x)
188
189
/* This code is for 745x processors */
190
flush_disable_745x:
191
/* Turn off EE and DR in MSR */
192
mfmsr r11
193
rlwinm r0,r11,0,~MSR_EE
194
rlwinm r0,r0,0,~MSR_DR
195
sync
196
mtmsr r0
197
isync
198
199
/* Stop prefetch streams */
200
PPC_DSSALL
201
sync
202
203
/* Disable L2 prefetching */
204
mfspr r0,SPRN_MSSCR0
205
rlwinm r0,r0,0,0,29
206
mtspr SPRN_MSSCR0,r0
207
sync
208
isync
209
lis r4,0
210
dcbf 0,r4
211
dcbf 0,r4
212
dcbf 0,r4
213
dcbf 0,r4
214
dcbf 0,r4
215
dcbf 0,r4
216
dcbf 0,r4
217
dcbf 0,r4
218
219
/* Due to a bug with the HW flush on some CPU revs, we occasionally
220
* experience data corruption. I'm adding a displacement flush along
221
* with a dcbf loop over a few Mb to "help". The problem isn't totally
222
* fixed by this in theory, but at least, in practice, I couldn't reproduce
223
* it even with a big hammer...
224
*/
225
226
lis r4,0x0002
227
mtctr r4
228
li r4,0
229
1:
230
lwz r0,0(r4)
231
addi r4,r4,32 /* Go to start of next cache line */
232
bdnz 1b
233
isync
234
235
/* Now, flush the first 4MB of memory */
236
lis r4,0x0002
237
mtctr r4
238
li r4,0
239
sync
240
1:
241
dcbf 0,r4
242
addi r4,r4,32 /* Go to start of next cache line */
243
bdnz 1b
244
245
/* Flush and disable the L1 data cache */
246
mfspr r6,SPRN_LDSTCR
247
lis r3,0xfff0 /* read from ROM for displacement flush */
248
li r4,0xfe /* start with only way 0 unlocked */
249
li r5,128 /* 128 lines in each way */
250
1: mtctr r5
251
rlwimi r6,r4,0,24,31
252
mtspr SPRN_LDSTCR,r6
253
sync
254
isync
255
2: lwz r0,0(r3) /* touch each cache line */
256
addi r3,r3,32
257
bdnz 2b
258
rlwinm r4,r4,1,24,30 /* move on to the next way */
259
ori r4,r4,1
260
cmpwi r4,0xff /* all done? */
261
bne 1b
262
/* now unlock the L1 data cache */
263
li r4,0
264
rlwimi r6,r4,0,24,31
265
sync
266
mtspr SPRN_LDSTCR,r6
267
sync
268
isync
269
270
/* Flush the L2 cache using the hardware assist */
271
mfspr r3,SPRN_L2CR
272
cmpwi r3,0 /* check if it is enabled first */
273
bge 4f
274
oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
275
b 2f
276
/* When disabling/locking L2, code must be in L1 */
277
.balign 32
278
1: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
279
3: sync
280
isync
281
b 1f
282
2: b 3f
283
3: sync
284
isync
285
b 1b
286
1: sync
287
isync
288
ori r0,r3,L2CR_L2HWF_745x
289
sync
290
mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
291
3: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
292
andi. r0,r0,L2CR_L2HWF_745x
293
bne 3b
294
sync
295
rlwinm r3,r3,0,~L2CR_L2E
296
b 2f
297
/* When disabling L2, code must be in L1 */
298
.balign 32
299
1: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
300
3: sync
301
isync
302
b 1f
303
2: b 3f
304
3: sync
305
isync
306
b 1b
307
1: sync
308
isync
309
oris r4,r3,L2CR_L2I@h
310
mtspr SPRN_L2CR,r4
311
sync
312
isync
313
1: mfspr r4,SPRN_L2CR
314
andis. r0,r4,L2CR_L2I@h
315
bne 1b
316
sync
317
318
BEGIN_FTR_SECTION
319
/* Flush the L3 cache using the hardware assist */
320
4: mfspr r3,SPRN_L3CR
321
cmpwi r3,0 /* check if it is enabled */
322
bge 6f
323
oris r0,r3,L3CR_L3IO@h
324
ori r0,r0,L3CR_L3DO
325
sync
326
mtspr SPRN_L3CR,r0 /* lock the L3 cache */
327
sync
328
isync
329
ori r0,r0,L3CR_L3HWF
330
sync
331
mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
332
5: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
333
andi. r0,r0,L3CR_L3HWF
334
bne 5b
335
rlwinm r3,r3,0,~L3CR_L3E
336
sync
337
mtspr SPRN_L3CR,r3 /* disable the L3 cache */
338
sync
339
ori r4,r3,L3CR_L3I
340
mtspr SPRN_L3CR,r4
341
1: mfspr r4,SPRN_L3CR
342
andi. r0,r4,L3CR_L3I
343
bne 1b
344
sync
345
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
346
347
6: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
348
rlwinm r0,r0,0,~HID0_DCE
349
mtspr SPRN_HID0,r0
350
sync
351
isync
352
mtmsr r11 /* restore DR and EE */
353
isync
354
blr
355
_ASM_NOKPROBE_SYMBOL(flush_disable_745x)
356
#endif /* CONFIG_PPC_BOOK3S_32 */
357
358