Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/microblaze/kernel/cpu/cache.c
15125 views
1
/*
2
* Cache control for MicroBlaze cache memories
3
*
4
* Copyright (C) 2007-2009 Michal Simek <[email protected]>
5
* Copyright (C) 2007-2009 PetaLogix
6
* Copyright (C) 2007-2009 John Williams <[email protected]>
7
*
8
* This file is subject to the terms and conditions of the GNU General
9
* Public License. See the file COPYING in the main directory of this
10
* archive for more details.
11
*/
12
13
#include <asm/cacheflush.h>
14
#include <linux/cache.h>
15
#include <asm/cpuinfo.h>
16
#include <asm/pvr.h>
17
18
static inline void __enable_icache_msr(void)
19
{
20
__asm__ __volatile__ (" msrset r0, %0; \
21
nop; " \
22
: : "i" (MSR_ICE) : "memory");
23
}
24
25
static inline void __disable_icache_msr(void)
26
{
27
__asm__ __volatile__ (" msrclr r0, %0; \
28
nop; " \
29
: : "i" (MSR_ICE) : "memory");
30
}
31
32
static inline void __enable_dcache_msr(void)
33
{
34
__asm__ __volatile__ (" msrset r0, %0; \
35
nop; " \
36
: \
37
: "i" (MSR_DCE) \
38
: "memory");
39
}
40
41
static inline void __disable_dcache_msr(void)
42
{
43
__asm__ __volatile__ (" msrclr r0, %0; \
44
nop; " \
45
: \
46
: "i" (MSR_DCE) \
47
: "memory");
48
}
49
50
static inline void __enable_icache_nomsr(void)
51
{
52
__asm__ __volatile__ (" mfs r12, rmsr; \
53
nop; \
54
ori r12, r12, %0; \
55
mts rmsr, r12; \
56
nop; " \
57
: \
58
: "i" (MSR_ICE) \
59
: "memory", "r12");
60
}
61
62
static inline void __disable_icache_nomsr(void)
63
{
64
__asm__ __volatile__ (" mfs r12, rmsr; \
65
nop; \
66
andi r12, r12, ~%0; \
67
mts rmsr, r12; \
68
nop; " \
69
: \
70
: "i" (MSR_ICE) \
71
: "memory", "r12");
72
}
73
74
static inline void __enable_dcache_nomsr(void)
75
{
76
__asm__ __volatile__ (" mfs r12, rmsr; \
77
nop; \
78
ori r12, r12, %0; \
79
mts rmsr, r12; \
80
nop; " \
81
: \
82
: "i" (MSR_DCE) \
83
: "memory", "r12");
84
}
85
86
static inline void __disable_dcache_nomsr(void)
87
{
88
__asm__ __volatile__ (" mfs r12, rmsr; \
89
nop; \
90
andi r12, r12, ~%0; \
91
mts rmsr, r12; \
92
nop; " \
93
: \
94
: "i" (MSR_DCE) \
95
: "memory", "r12");
96
}
97
98
99
/* Helper macro for computing the limits of cache range loops
100
*
101
* End address can be unaligned which is OK for C implementation.
102
* ASM implementation align it in ASM macros
103
*/
104
#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
105
do { \
106
int align = ~(cache_line_length - 1); \
107
end = min(start + cache_size, end); \
108
start &= align; \
109
} while (0);
110
111
/*
112
* Helper macro to loop over the specified cache_size/line_length and
113
* execute 'op' on that cacheline
114
*/
115
#define CACHE_ALL_LOOP(cache_size, line_length, op) \
116
do { \
117
unsigned int len = cache_size - line_length; \
118
int step = -line_length; \
119
WARN_ON(step >= 0); \
120
\
121
__asm__ __volatile__ (" 1: " #op " %0, r0; \
122
bgtid %0, 1b; \
123
addk %0, %0, %1; \
124
" : : "r" (len), "r" (step) \
125
: "memory"); \
126
} while (0);
127
128
/* Used for wdc.flush/clear which can use rB for offset which is not possible
129
* to use for simple wdc or wic.
130
*
131
* start address is cache aligned
132
* end address is not aligned, if end is aligned then I have to subtract
133
* cacheline length because I can't flush/invalidate the next cacheline.
134
* If is not, I align it because I will flush/invalidate whole line.
135
*/
136
#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
137
do { \
138
int step = -line_length; \
139
int align = ~(line_length - 1); \
140
int count; \
141
end = ((end & align) == end) ? end - line_length : end & align; \
142
count = end - start; \
143
WARN_ON(count < 0); \
144
\
145
__asm__ __volatile__ (" 1: " #op " %0, %1; \
146
bgtid %1, 1b; \
147
addk %1, %1, %2; \
148
" : : "r" (start), "r" (count), \
149
"r" (step) : "memory"); \
150
} while (0);
151
152
/* It is used only first parameter for OP - for wic, wdc */
153
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
154
do { \
155
int volatile temp; \
156
int align = ~(line_length - 1); \
157
end = ((end & align) == end) ? end - line_length : end & align; \
158
WARN_ON(end - start < 0); \
159
\
160
__asm__ __volatile__ (" 1: " #op " %1, r0; \
161
cmpu %0, %1, %2; \
162
bgtid %0, 1b; \
163
addk %1, %1, %3; \
164
" : : "r" (temp), "r" (start), "r" (end),\
165
"r" (line_length) : "memory"); \
166
} while (0);
167
168
#define ASM_LOOP
169
170
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
171
{
172
unsigned long flags;
173
#ifndef ASM_LOOP
174
int i;
175
#endif
176
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
177
(unsigned int)start, (unsigned int) end);
178
179
CACHE_LOOP_LIMITS(start, end,
180
cpuinfo.icache_line_length, cpuinfo.icache_size);
181
182
local_irq_save(flags);
183
__disable_icache_msr();
184
185
#ifdef ASM_LOOP
186
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
187
#else
188
for (i = start; i < end; i += cpuinfo.icache_line_length)
189
__asm__ __volatile__ ("wic %0, r0;" \
190
: : "r" (i));
191
#endif
192
__enable_icache_msr();
193
local_irq_restore(flags);
194
}
195
196
static void __flush_icache_range_nomsr_irq(unsigned long start,
197
unsigned long end)
198
{
199
unsigned long flags;
200
#ifndef ASM_LOOP
201
int i;
202
#endif
203
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
204
(unsigned int)start, (unsigned int) end);
205
206
CACHE_LOOP_LIMITS(start, end,
207
cpuinfo.icache_line_length, cpuinfo.icache_size);
208
209
local_irq_save(flags);
210
__disable_icache_nomsr();
211
212
#ifdef ASM_LOOP
213
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
214
#else
215
for (i = start; i < end; i += cpuinfo.icache_line_length)
216
__asm__ __volatile__ ("wic %0, r0;" \
217
: : "r" (i));
218
#endif
219
220
__enable_icache_nomsr();
221
local_irq_restore(flags);
222
}
223
224
static void __flush_icache_range_noirq(unsigned long start,
225
unsigned long end)
226
{
227
#ifndef ASM_LOOP
228
int i;
229
#endif
230
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
231
(unsigned int)start, (unsigned int) end);
232
233
CACHE_LOOP_LIMITS(start, end,
234
cpuinfo.icache_line_length, cpuinfo.icache_size);
235
#ifdef ASM_LOOP
236
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
237
#else
238
for (i = start; i < end; i += cpuinfo.icache_line_length)
239
__asm__ __volatile__ ("wic %0, r0;" \
240
: : "r" (i));
241
#endif
242
}
243
244
static void __flush_icache_all_msr_irq(void)
245
{
246
unsigned long flags;
247
#ifndef ASM_LOOP
248
int i;
249
#endif
250
pr_debug("%s\n", __func__);
251
252
local_irq_save(flags);
253
__disable_icache_msr();
254
#ifdef ASM_LOOP
255
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
256
#else
257
for (i = 0; i < cpuinfo.icache_size;
258
i += cpuinfo.icache_line_length)
259
__asm__ __volatile__ ("wic %0, r0;" \
260
: : "r" (i));
261
#endif
262
__enable_icache_msr();
263
local_irq_restore(flags);
264
}
265
266
static void __flush_icache_all_nomsr_irq(void)
267
{
268
unsigned long flags;
269
#ifndef ASM_LOOP
270
int i;
271
#endif
272
pr_debug("%s\n", __func__);
273
274
local_irq_save(flags);
275
__disable_icache_nomsr();
276
#ifdef ASM_LOOP
277
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
278
#else
279
for (i = 0; i < cpuinfo.icache_size;
280
i += cpuinfo.icache_line_length)
281
__asm__ __volatile__ ("wic %0, r0;" \
282
: : "r" (i));
283
#endif
284
__enable_icache_nomsr();
285
local_irq_restore(flags);
286
}
287
288
static void __flush_icache_all_noirq(void)
289
{
290
#ifndef ASM_LOOP
291
int i;
292
#endif
293
pr_debug("%s\n", __func__);
294
#ifdef ASM_LOOP
295
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
296
#else
297
for (i = 0; i < cpuinfo.icache_size;
298
i += cpuinfo.icache_line_length)
299
__asm__ __volatile__ ("wic %0, r0;" \
300
: : "r" (i));
301
#endif
302
}
303
304
static void __invalidate_dcache_all_msr_irq(void)
305
{
306
unsigned long flags;
307
#ifndef ASM_LOOP
308
int i;
309
#endif
310
pr_debug("%s\n", __func__);
311
312
local_irq_save(flags);
313
__disable_dcache_msr();
314
#ifdef ASM_LOOP
315
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
316
#else
317
for (i = 0; i < cpuinfo.dcache_size;
318
i += cpuinfo.dcache_line_length)
319
__asm__ __volatile__ ("wdc %0, r0;" \
320
: : "r" (i));
321
#endif
322
__enable_dcache_msr();
323
local_irq_restore(flags);
324
}
325
326
static void __invalidate_dcache_all_nomsr_irq(void)
327
{
328
unsigned long flags;
329
#ifndef ASM_LOOP
330
int i;
331
#endif
332
pr_debug("%s\n", __func__);
333
334
local_irq_save(flags);
335
__disable_dcache_nomsr();
336
#ifdef ASM_LOOP
337
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
338
#else
339
for (i = 0; i < cpuinfo.dcache_size;
340
i += cpuinfo.dcache_line_length)
341
__asm__ __volatile__ ("wdc %0, r0;" \
342
: : "r" (i));
343
#endif
344
__enable_dcache_nomsr();
345
local_irq_restore(flags);
346
}
347
348
static void __invalidate_dcache_all_noirq_wt(void)
349
{
350
#ifndef ASM_LOOP
351
int i;
352
#endif
353
pr_debug("%s\n", __func__);
354
#ifdef ASM_LOOP
355
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
356
#else
357
for (i = 0; i < cpuinfo.dcache_size;
358
i += cpuinfo.dcache_line_length)
359
__asm__ __volatile__ ("wdc %0, r0;" \
360
: : "r" (i));
361
#endif
362
}
363
364
/* FIXME It is blindly invalidation as is expected
365
* but can't be called on noMMU in microblaze_cache_init below
366
*
367
* MS: noMMU kernel won't boot if simple wdc is used
368
* The reason should be that there are discared data which kernel needs
369
*/
370
static void __invalidate_dcache_all_wb(void)
371
{
372
#ifndef ASM_LOOP
373
int i;
374
#endif
375
pr_debug("%s\n", __func__);
376
#ifdef ASM_LOOP
377
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
378
wdc)
379
#else
380
for (i = 0; i < cpuinfo.dcache_size;
381
i += cpuinfo.dcache_line_length)
382
__asm__ __volatile__ ("wdc %0, r0;" \
383
: : "r" (i));
384
#endif
385
}
386
387
static void __invalidate_dcache_range_wb(unsigned long start,
388
unsigned long end)
389
{
390
#ifndef ASM_LOOP
391
int i;
392
#endif
393
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
394
(unsigned int)start, (unsigned int) end);
395
396
CACHE_LOOP_LIMITS(start, end,
397
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
398
#ifdef ASM_LOOP
399
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
400
#else
401
for (i = start; i < end; i += cpuinfo.dcache_line_length)
402
__asm__ __volatile__ ("wdc.clear %0, r0;" \
403
: : "r" (i));
404
#endif
405
}
406
407
static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
408
unsigned long end)
409
{
410
#ifndef ASM_LOOP
411
int i;
412
#endif
413
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
414
(unsigned int)start, (unsigned int) end);
415
CACHE_LOOP_LIMITS(start, end,
416
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
417
418
#ifdef ASM_LOOP
419
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
420
#else
421
for (i = start; i < end; i += cpuinfo.dcache_line_length)
422
__asm__ __volatile__ ("wdc %0, r0;" \
423
: : "r" (i));
424
#endif
425
}
426
427
static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
428
unsigned long end)
429
{
430
unsigned long flags;
431
#ifndef ASM_LOOP
432
int i;
433
#endif
434
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
435
(unsigned int)start, (unsigned int) end);
436
CACHE_LOOP_LIMITS(start, end,
437
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
438
439
local_irq_save(flags);
440
__disable_dcache_msr();
441
442
#ifdef ASM_LOOP
443
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
444
#else
445
for (i = start; i < end; i += cpuinfo.dcache_line_length)
446
__asm__ __volatile__ ("wdc %0, r0;" \
447
: : "r" (i));
448
#endif
449
450
__enable_dcache_msr();
451
local_irq_restore(flags);
452
}
453
454
static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
455
unsigned long end)
456
{
457
unsigned long flags;
458
#ifndef ASM_LOOP
459
int i;
460
#endif
461
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
462
(unsigned int)start, (unsigned int) end);
463
464
CACHE_LOOP_LIMITS(start, end,
465
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
466
467
local_irq_save(flags);
468
__disable_dcache_nomsr();
469
470
#ifdef ASM_LOOP
471
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
472
#else
473
for (i = start; i < end; i += cpuinfo.dcache_line_length)
474
__asm__ __volatile__ ("wdc %0, r0;" \
475
: : "r" (i));
476
#endif
477
478
__enable_dcache_nomsr();
479
local_irq_restore(flags);
480
}
481
482
static void __flush_dcache_all_wb(void)
483
{
484
#ifndef ASM_LOOP
485
int i;
486
#endif
487
pr_debug("%s\n", __func__);
488
#ifdef ASM_LOOP
489
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
490
wdc.flush);
491
#else
492
for (i = 0; i < cpuinfo.dcache_size;
493
i += cpuinfo.dcache_line_length)
494
__asm__ __volatile__ ("wdc.flush %0, r0;" \
495
: : "r" (i));
496
#endif
497
}
498
499
static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
500
{
501
#ifndef ASM_LOOP
502
int i;
503
#endif
504
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
505
(unsigned int)start, (unsigned int) end);
506
507
CACHE_LOOP_LIMITS(start, end,
508
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
509
#ifdef ASM_LOOP
510
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
511
#else
512
for (i = start; i < end; i += cpuinfo.dcache_line_length)
513
__asm__ __volatile__ ("wdc.flush %0, r0;" \
514
: : "r" (i));
515
#endif
516
}
517
518
/* struct for wb caches and for wt caches */
519
struct scache *mbc;
520
521
/* new wb cache model */
522
static const struct scache wb_msr = {
523
.ie = __enable_icache_msr,
524
.id = __disable_icache_msr,
525
.ifl = __flush_icache_all_noirq,
526
.iflr = __flush_icache_range_noirq,
527
.iin = __flush_icache_all_noirq,
528
.iinr = __flush_icache_range_noirq,
529
.de = __enable_dcache_msr,
530
.dd = __disable_dcache_msr,
531
.dfl = __flush_dcache_all_wb,
532
.dflr = __flush_dcache_range_wb,
533
.din = __invalidate_dcache_all_wb,
534
.dinr = __invalidate_dcache_range_wb,
535
};
536
537
/* There is only difference in ie, id, de, dd functions */
538
static const struct scache wb_nomsr = {
539
.ie = __enable_icache_nomsr,
540
.id = __disable_icache_nomsr,
541
.ifl = __flush_icache_all_noirq,
542
.iflr = __flush_icache_range_noirq,
543
.iin = __flush_icache_all_noirq,
544
.iinr = __flush_icache_range_noirq,
545
.de = __enable_dcache_nomsr,
546
.dd = __disable_dcache_nomsr,
547
.dfl = __flush_dcache_all_wb,
548
.dflr = __flush_dcache_range_wb,
549
.din = __invalidate_dcache_all_wb,
550
.dinr = __invalidate_dcache_range_wb,
551
};
552
553
/* Old wt cache model with disabling irq and turn off cache */
554
static const struct scache wt_msr = {
555
.ie = __enable_icache_msr,
556
.id = __disable_icache_msr,
557
.ifl = __flush_icache_all_msr_irq,
558
.iflr = __flush_icache_range_msr_irq,
559
.iin = __flush_icache_all_msr_irq,
560
.iinr = __flush_icache_range_msr_irq,
561
.de = __enable_dcache_msr,
562
.dd = __disable_dcache_msr,
563
.dfl = __invalidate_dcache_all_msr_irq,
564
.dflr = __invalidate_dcache_range_msr_irq_wt,
565
.din = __invalidate_dcache_all_msr_irq,
566
.dinr = __invalidate_dcache_range_msr_irq_wt,
567
};
568
569
static const struct scache wt_nomsr = {
570
.ie = __enable_icache_nomsr,
571
.id = __disable_icache_nomsr,
572
.ifl = __flush_icache_all_nomsr_irq,
573
.iflr = __flush_icache_range_nomsr_irq,
574
.iin = __flush_icache_all_nomsr_irq,
575
.iinr = __flush_icache_range_nomsr_irq,
576
.de = __enable_dcache_nomsr,
577
.dd = __disable_dcache_nomsr,
578
.dfl = __invalidate_dcache_all_nomsr_irq,
579
.dflr = __invalidate_dcache_range_nomsr_irq,
580
.din = __invalidate_dcache_all_nomsr_irq,
581
.dinr = __invalidate_dcache_range_nomsr_irq,
582
};
583
584
/* New wt cache model for newer Microblaze versions */
585
static const struct scache wt_msr_noirq = {
586
.ie = __enable_icache_msr,
587
.id = __disable_icache_msr,
588
.ifl = __flush_icache_all_noirq,
589
.iflr = __flush_icache_range_noirq,
590
.iin = __flush_icache_all_noirq,
591
.iinr = __flush_icache_range_noirq,
592
.de = __enable_dcache_msr,
593
.dd = __disable_dcache_msr,
594
.dfl = __invalidate_dcache_all_noirq_wt,
595
.dflr = __invalidate_dcache_range_nomsr_wt,
596
.din = __invalidate_dcache_all_noirq_wt,
597
.dinr = __invalidate_dcache_range_nomsr_wt,
598
};
599
600
static const struct scache wt_nomsr_noirq = {
601
.ie = __enable_icache_nomsr,
602
.id = __disable_icache_nomsr,
603
.ifl = __flush_icache_all_noirq,
604
.iflr = __flush_icache_range_noirq,
605
.iin = __flush_icache_all_noirq,
606
.iinr = __flush_icache_range_noirq,
607
.de = __enable_dcache_nomsr,
608
.dd = __disable_dcache_nomsr,
609
.dfl = __invalidate_dcache_all_noirq_wt,
610
.dflr = __invalidate_dcache_range_nomsr_wt,
611
.din = __invalidate_dcache_all_noirq_wt,
612
.dinr = __invalidate_dcache_range_nomsr_wt,
613
};
614
615
/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
616
#define CPUVER_7_20_A 0x0c
617
#define CPUVER_7_20_D 0x0f
618
619
#define INFO(s) printk(KERN_INFO "cache: " s "\n");
620
621
void microblaze_cache_init(void)
622
{
623
if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
624
if (cpuinfo.dcache_wb) {
625
INFO("wb_msr");
626
mbc = (struct scache *)&wb_msr;
627
if (cpuinfo.ver_code <= CPUVER_7_20_D) {
628
/* MS: problem with signal handling - hw bug */
629
INFO("WB won't work properly");
630
}
631
} else {
632
if (cpuinfo.ver_code >= CPUVER_7_20_A) {
633
INFO("wt_msr_noirq");
634
mbc = (struct scache *)&wt_msr_noirq;
635
} else {
636
INFO("wt_msr");
637
mbc = (struct scache *)&wt_msr;
638
}
639
}
640
} else {
641
if (cpuinfo.dcache_wb) {
642
INFO("wb_nomsr");
643
mbc = (struct scache *)&wb_nomsr;
644
if (cpuinfo.ver_code <= CPUVER_7_20_D) {
645
/* MS: problem with signal handling - hw bug */
646
INFO("WB won't work properly");
647
}
648
} else {
649
if (cpuinfo.ver_code >= CPUVER_7_20_A) {
650
INFO("wt_nomsr_noirq");
651
mbc = (struct scache *)&wt_nomsr_noirq;
652
} else {
653
INFO("wt_nomsr");
654
mbc = (struct scache *)&wt_nomsr;
655
}
656
}
657
}
658
/* FIXME Invalidation is done in U-BOOT
659
* WT cache: Data is already written to main memory
660
* WB cache: Discard data on noMMU which caused that kernel doesn't boot
661
*/
662
/* invalidate_dcache(); */
663
enable_dcache();
664
665
invalidate_icache();
666
enable_icache();
667
}
668
669