Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/sh/mm/pmb.c
10817 views
1
/*
2
* arch/sh/mm/pmb.c
3
*
4
* Privileged Space Mapping Buffer (PMB) Support.
5
*
6
* Copyright (C) 2005 - 2011 Paul Mundt
7
* Copyright (C) 2010 Matt Fleming
8
*
9
* This file is subject to the terms and conditions of the GNU General Public
10
* License. See the file "COPYING" in the main directory of this archive
11
* for more details.
12
*/
13
#include <linux/init.h>
14
#include <linux/kernel.h>
15
#include <linux/syscore_ops.h>
16
#include <linux/cpu.h>
17
#include <linux/module.h>
18
#include <linux/bitops.h>
19
#include <linux/debugfs.h>
20
#include <linux/fs.h>
21
#include <linux/seq_file.h>
22
#include <linux/err.h>
23
#include <linux/io.h>
24
#include <linux/spinlock.h>
25
#include <linux/vmalloc.h>
26
#include <asm/cacheflush.h>
27
#include <asm/sizes.h>
28
#include <asm/system.h>
29
#include <asm/uaccess.h>
30
#include <asm/pgtable.h>
31
#include <asm/page.h>
32
#include <asm/mmu.h>
33
#include <asm/mmu_context.h>
34
35
struct pmb_entry;
36
37
struct pmb_entry {
38
unsigned long vpn;
39
unsigned long ppn;
40
unsigned long flags;
41
unsigned long size;
42
43
raw_spinlock_t lock;
44
45
/*
46
* 0 .. NR_PMB_ENTRIES for specific entry selection, or
47
* PMB_NO_ENTRY to search for a free one
48
*/
49
int entry;
50
51
/* Adjacent entry link for contiguous multi-entry mappings */
52
struct pmb_entry *link;
53
};
54
55
static struct {
56
unsigned long size;
57
int flag;
58
} pmb_sizes[] = {
59
{ .size = SZ_512M, .flag = PMB_SZ_512M, },
60
{ .size = SZ_128M, .flag = PMB_SZ_128M, },
61
{ .size = SZ_64M, .flag = PMB_SZ_64M, },
62
{ .size = SZ_16M, .flag = PMB_SZ_16M, },
63
};
64
65
static void pmb_unmap_entry(struct pmb_entry *, int depth);
66
67
static DEFINE_RWLOCK(pmb_rwlock);
68
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
69
static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
70
71
static unsigned int pmb_iomapping_enabled;
72
73
static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
74
{
75
return (entry & PMB_E_MASK) << PMB_E_SHIFT;
76
}
77
78
static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
79
{
80
return mk_pmb_entry(entry) | PMB_ADDR;
81
}
82
83
static __always_inline unsigned long mk_pmb_data(unsigned int entry)
84
{
85
return mk_pmb_entry(entry) | PMB_DATA;
86
}
87
88
static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
89
{
90
return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
91
}
92
93
/*
94
* Ensure that the PMB entries match our cache configuration.
95
*
96
* When we are in 32-bit address extended mode, CCR.CB becomes
97
* invalid, so care must be taken to manually adjust cacheable
98
* translations.
99
*/
100
static __always_inline unsigned long pmb_cache_flags(void)
101
{
102
unsigned long flags = 0;
103
104
#if defined(CONFIG_CACHE_OFF)
105
flags |= PMB_WT | PMB_UB;
106
#elif defined(CONFIG_CACHE_WRITETHROUGH)
107
flags |= PMB_C | PMB_WT | PMB_UB;
108
#elif defined(CONFIG_CACHE_WRITEBACK)
109
flags |= PMB_C;
110
#endif
111
112
return flags;
113
}
114
115
/*
116
* Convert typical pgprot value to the PMB equivalent
117
*/
118
static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
119
{
120
unsigned long pmb_flags = 0;
121
u64 flags = pgprot_val(prot);
122
123
if (flags & _PAGE_CACHABLE)
124
pmb_flags |= PMB_C;
125
if (flags & _PAGE_WT)
126
pmb_flags |= PMB_WT | PMB_UB;
127
128
return pmb_flags;
129
}
130
131
static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
132
{
133
return (b->vpn == (a->vpn + a->size)) &&
134
(b->ppn == (a->ppn + a->size)) &&
135
(b->flags == a->flags);
136
}
137
138
static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
139
unsigned long size)
140
{
141
int i;
142
143
read_lock(&pmb_rwlock);
144
145
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
146
struct pmb_entry *pmbe, *iter;
147
unsigned long span;
148
149
if (!test_bit(i, pmb_map))
150
continue;
151
152
pmbe = &pmb_entry_list[i];
153
154
/*
155
* See if VPN and PPN are bounded by an existing mapping.
156
*/
157
if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
158
continue;
159
if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
160
continue;
161
162
/*
163
* Now see if we're in range of a simple mapping.
164
*/
165
if (size <= pmbe->size) {
166
read_unlock(&pmb_rwlock);
167
return true;
168
}
169
170
span = pmbe->size;
171
172
/*
173
* Finally for sizes that involve compound mappings, walk
174
* the chain.
175
*/
176
for (iter = pmbe->link; iter; iter = iter->link)
177
span += iter->size;
178
179
/*
180
* Nothing else to do if the range requirements are met.
181
*/
182
if (size <= span) {
183
read_unlock(&pmb_rwlock);
184
return true;
185
}
186
}
187
188
read_unlock(&pmb_rwlock);
189
return false;
190
}
191
192
static bool pmb_size_valid(unsigned long size)
193
{
194
int i;
195
196
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
197
if (pmb_sizes[i].size == size)
198
return true;
199
200
return false;
201
}
202
203
static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
204
{
205
return (addr >= P1SEG && (addr + size - 1) < P3SEG);
206
}
207
208
static inline bool pmb_prot_valid(pgprot_t prot)
209
{
210
return (pgprot_val(prot) & _PAGE_USER) == 0;
211
}
212
213
static int pmb_size_to_flags(unsigned long size)
214
{
215
int i;
216
217
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
218
if (pmb_sizes[i].size == size)
219
return pmb_sizes[i].flag;
220
221
return 0;
222
}
223
224
static int pmb_alloc_entry(void)
225
{
226
int pos;
227
228
pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
229
if (pos >= 0 && pos < NR_PMB_ENTRIES)
230
__set_bit(pos, pmb_map);
231
else
232
pos = -ENOSPC;
233
234
return pos;
235
}
236
237
static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
238
unsigned long flags, int entry)
239
{
240
struct pmb_entry *pmbe;
241
unsigned long irqflags;
242
void *ret = NULL;
243
int pos;
244
245
write_lock_irqsave(&pmb_rwlock, irqflags);
246
247
if (entry == PMB_NO_ENTRY) {
248
pos = pmb_alloc_entry();
249
if (unlikely(pos < 0)) {
250
ret = ERR_PTR(pos);
251
goto out;
252
}
253
} else {
254
if (__test_and_set_bit(entry, pmb_map)) {
255
ret = ERR_PTR(-ENOSPC);
256
goto out;
257
}
258
259
pos = entry;
260
}
261
262
write_unlock_irqrestore(&pmb_rwlock, irqflags);
263
264
pmbe = &pmb_entry_list[pos];
265
266
memset(pmbe, 0, sizeof(struct pmb_entry));
267
268
raw_spin_lock_init(&pmbe->lock);
269
270
pmbe->vpn = vpn;
271
pmbe->ppn = ppn;
272
pmbe->flags = flags;
273
pmbe->entry = pos;
274
275
return pmbe;
276
277
out:
278
write_unlock_irqrestore(&pmb_rwlock, irqflags);
279
return ret;
280
}
281
282
static void pmb_free(struct pmb_entry *pmbe)
283
{
284
__clear_bit(pmbe->entry, pmb_map);
285
286
pmbe->entry = PMB_NO_ENTRY;
287
pmbe->link = NULL;
288
}
289
290
/*
291
* Must be run uncached.
292
*/
293
static void __set_pmb_entry(struct pmb_entry *pmbe)
294
{
295
unsigned long addr, data;
296
297
addr = mk_pmb_addr(pmbe->entry);
298
data = mk_pmb_data(pmbe->entry);
299
300
jump_to_uncached();
301
302
/* Set V-bit */
303
__raw_writel(pmbe->vpn | PMB_V, addr);
304
__raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
305
306
back_to_cached();
307
}
308
309
static void __clear_pmb_entry(struct pmb_entry *pmbe)
310
{
311
unsigned long addr, data;
312
unsigned long addr_val, data_val;
313
314
addr = mk_pmb_addr(pmbe->entry);
315
data = mk_pmb_data(pmbe->entry);
316
317
addr_val = __raw_readl(addr);
318
data_val = __raw_readl(data);
319
320
/* Clear V-bit */
321
writel_uncached(addr_val & ~PMB_V, addr);
322
writel_uncached(data_val & ~PMB_V, data);
323
}
324
325
#ifdef CONFIG_PM
326
static void set_pmb_entry(struct pmb_entry *pmbe)
327
{
328
unsigned long flags;
329
330
raw_spin_lock_irqsave(&pmbe->lock, flags);
331
__set_pmb_entry(pmbe);
332
raw_spin_unlock_irqrestore(&pmbe->lock, flags);
333
}
334
#endif /* CONFIG_PM */
335
336
int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
337
unsigned long size, pgprot_t prot)
338
{
339
struct pmb_entry *pmbp, *pmbe;
340
unsigned long orig_addr, orig_size;
341
unsigned long flags, pmb_flags;
342
int i, mapped;
343
344
if (size < SZ_16M)
345
return -EINVAL;
346
if (!pmb_addr_valid(vaddr, size))
347
return -EFAULT;
348
if (pmb_mapping_exists(vaddr, phys, size))
349
return 0;
350
351
orig_addr = vaddr;
352
orig_size = size;
353
354
flush_tlb_kernel_range(vaddr, vaddr + size);
355
356
pmb_flags = pgprot_to_pmb_flags(prot);
357
pmbp = NULL;
358
359
do {
360
for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
361
if (size < pmb_sizes[i].size)
362
continue;
363
364
pmbe = pmb_alloc(vaddr, phys, pmb_flags |
365
pmb_sizes[i].flag, PMB_NO_ENTRY);
366
if (IS_ERR(pmbe)) {
367
pmb_unmap_entry(pmbp, mapped);
368
return PTR_ERR(pmbe);
369
}
370
371
raw_spin_lock_irqsave(&pmbe->lock, flags);
372
373
pmbe->size = pmb_sizes[i].size;
374
375
__set_pmb_entry(pmbe);
376
377
phys += pmbe->size;
378
vaddr += pmbe->size;
379
size -= pmbe->size;
380
381
/*
382
* Link adjacent entries that span multiple PMB
383
* entries for easier tear-down.
384
*/
385
if (likely(pmbp)) {
386
raw_spin_lock_nested(&pmbp->lock,
387
SINGLE_DEPTH_NESTING);
388
pmbp->link = pmbe;
389
raw_spin_unlock(&pmbp->lock);
390
}
391
392
pmbp = pmbe;
393
394
/*
395
* Instead of trying smaller sizes on every
396
* iteration (even if we succeed in allocating
397
* space), try using pmb_sizes[i].size again.
398
*/
399
i--;
400
mapped++;
401
402
raw_spin_unlock_irqrestore(&pmbe->lock, flags);
403
}
404
} while (size >= SZ_16M);
405
406
flush_cache_vmap(orig_addr, orig_addr + orig_size);
407
408
return 0;
409
}
410
411
void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
412
pgprot_t prot, void *caller)
413
{
414
unsigned long vaddr;
415
phys_addr_t offset, last_addr;
416
phys_addr_t align_mask;
417
unsigned long aligned;
418
struct vm_struct *area;
419
int i, ret;
420
421
if (!pmb_iomapping_enabled)
422
return NULL;
423
424
/*
425
* Small mappings need to go through the TLB.
426
*/
427
if (size < SZ_16M)
428
return ERR_PTR(-EINVAL);
429
if (!pmb_prot_valid(prot))
430
return ERR_PTR(-EINVAL);
431
432
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
433
if (size >= pmb_sizes[i].size)
434
break;
435
436
last_addr = phys + size;
437
align_mask = ~(pmb_sizes[i].size - 1);
438
offset = phys & ~align_mask;
439
phys &= align_mask;
440
aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
441
442
/*
443
* XXX: This should really start from uncached_end, but this
444
* causes the MMU to reset, so for now we restrict it to the
445
* 0xb000...0xc000 range.
446
*/
447
area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
448
P3SEG, caller);
449
if (!area)
450
return NULL;
451
452
area->phys_addr = phys;
453
vaddr = (unsigned long)area->addr;
454
455
ret = pmb_bolt_mapping(vaddr, phys, size, prot);
456
if (unlikely(ret != 0))
457
return ERR_PTR(ret);
458
459
return (void __iomem *)(offset + (char *)vaddr);
460
}
461
462
int pmb_unmap(void __iomem *addr)
463
{
464
struct pmb_entry *pmbe = NULL;
465
unsigned long vaddr = (unsigned long __force)addr;
466
int i, found = 0;
467
468
read_lock(&pmb_rwlock);
469
470
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
471
if (test_bit(i, pmb_map)) {
472
pmbe = &pmb_entry_list[i];
473
if (pmbe->vpn == vaddr) {
474
found = 1;
475
break;
476
}
477
}
478
}
479
480
read_unlock(&pmb_rwlock);
481
482
if (found) {
483
pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
484
return 0;
485
}
486
487
return -EINVAL;
488
}
489
490
static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
491
{
492
do {
493
struct pmb_entry *pmblink = pmbe;
494
495
/*
496
* We may be called before this pmb_entry has been
497
* entered into the PMB table via set_pmb_entry(), but
498
* that's OK because we've allocated a unique slot for
499
* this entry in pmb_alloc() (even if we haven't filled
500
* it yet).
501
*
502
* Therefore, calling __clear_pmb_entry() is safe as no
503
* other mapping can be using that slot.
504
*/
505
__clear_pmb_entry(pmbe);
506
507
flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
508
509
pmbe = pmblink->link;
510
511
pmb_free(pmblink);
512
} while (pmbe && --depth);
513
}
514
515
static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
516
{
517
unsigned long flags;
518
519
if (unlikely(!pmbe))
520
return;
521
522
write_lock_irqsave(&pmb_rwlock, flags);
523
__pmb_unmap_entry(pmbe, depth);
524
write_unlock_irqrestore(&pmb_rwlock, flags);
525
}
526
527
static void __init pmb_notify(void)
528
{
529
int i;
530
531
pr_info("PMB: boot mappings:\n");
532
533
read_lock(&pmb_rwlock);
534
535
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
536
struct pmb_entry *pmbe;
537
538
if (!test_bit(i, pmb_map))
539
continue;
540
541
pmbe = &pmb_entry_list[i];
542
543
pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
544
pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
545
pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
546
}
547
548
read_unlock(&pmb_rwlock);
549
}
550
551
/*
552
* Sync our software copy of the PMB mappings with those in hardware. The
553
* mappings in the hardware PMB were either set up by the bootloader or
554
* very early on by the kernel.
555
*/
556
static void __init pmb_synchronize(void)
557
{
558
struct pmb_entry *pmbp = NULL;
559
int i, j;
560
561
/*
562
* Run through the initial boot mappings, log the established
563
* ones, and blow away anything that falls outside of the valid
564
* PPN range. Specifically, we only care about existing mappings
565
* that impact the cached/uncached sections.
566
*
567
* Note that touching these can be a bit of a minefield; the boot
568
* loader can establish multi-page mappings with the same caching
569
* attributes, so we need to ensure that we aren't modifying a
570
* mapping that we're presently executing from, or may execute
571
* from in the case of straddling page boundaries.
572
*
573
* In the future we will have to tidy up after the boot loader by
574
* jumping between the cached and uncached mappings and tearing
575
* down alternating mappings while executing from the other.
576
*/
577
for (i = 0; i < NR_PMB_ENTRIES; i++) {
578
unsigned long addr, data;
579
unsigned long addr_val, data_val;
580
unsigned long ppn, vpn, flags;
581
unsigned long irqflags;
582
unsigned int size;
583
struct pmb_entry *pmbe;
584
585
addr = mk_pmb_addr(i);
586
data = mk_pmb_data(i);
587
588
addr_val = __raw_readl(addr);
589
data_val = __raw_readl(data);
590
591
/*
592
* Skip over any bogus entries
593
*/
594
if (!(data_val & PMB_V) || !(addr_val & PMB_V))
595
continue;
596
597
ppn = data_val & PMB_PFN_MASK;
598
vpn = addr_val & PMB_PFN_MASK;
599
600
/*
601
* Only preserve in-range mappings.
602
*/
603
if (!pmb_ppn_in_range(ppn)) {
604
/*
605
* Invalidate anything out of bounds.
606
*/
607
writel_uncached(addr_val & ~PMB_V, addr);
608
writel_uncached(data_val & ~PMB_V, data);
609
continue;
610
}
611
612
/*
613
* Update the caching attributes if necessary
614
*/
615
if (data_val & PMB_C) {
616
data_val &= ~PMB_CACHE_MASK;
617
data_val |= pmb_cache_flags();
618
619
writel_uncached(data_val, data);
620
}
621
622
size = data_val & PMB_SZ_MASK;
623
flags = size | (data_val & PMB_CACHE_MASK);
624
625
pmbe = pmb_alloc(vpn, ppn, flags, i);
626
if (IS_ERR(pmbe)) {
627
WARN_ON_ONCE(1);
628
continue;
629
}
630
631
raw_spin_lock_irqsave(&pmbe->lock, irqflags);
632
633
for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
634
if (pmb_sizes[j].flag == size)
635
pmbe->size = pmb_sizes[j].size;
636
637
if (pmbp) {
638
raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
639
/*
640
* Compare the previous entry against the current one to
641
* see if the entries span a contiguous mapping. If so,
642
* setup the entry links accordingly. Compound mappings
643
* are later coalesced.
644
*/
645
if (pmb_can_merge(pmbp, pmbe))
646
pmbp->link = pmbe;
647
raw_spin_unlock(&pmbp->lock);
648
}
649
650
pmbp = pmbe;
651
652
raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
653
}
654
}
655
656
static void __init pmb_merge(struct pmb_entry *head)
657
{
658
unsigned long span, newsize;
659
struct pmb_entry *tail;
660
int i = 1, depth = 0;
661
662
span = newsize = head->size;
663
664
tail = head->link;
665
while (tail) {
666
span += tail->size;
667
668
if (pmb_size_valid(span)) {
669
newsize = span;
670
depth = i;
671
}
672
673
/* This is the end of the line.. */
674
if (!tail->link)
675
break;
676
677
tail = tail->link;
678
i++;
679
}
680
681
/*
682
* The merged page size must be valid.
683
*/
684
if (!depth || !pmb_size_valid(newsize))
685
return;
686
687
head->flags &= ~PMB_SZ_MASK;
688
head->flags |= pmb_size_to_flags(newsize);
689
690
head->size = newsize;
691
692
__pmb_unmap_entry(head->link, depth);
693
__set_pmb_entry(head);
694
}
695
696
static void __init pmb_coalesce(void)
697
{
698
unsigned long flags;
699
int i;
700
701
write_lock_irqsave(&pmb_rwlock, flags);
702
703
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
704
struct pmb_entry *pmbe;
705
706
if (!test_bit(i, pmb_map))
707
continue;
708
709
pmbe = &pmb_entry_list[i];
710
711
/*
712
* We're only interested in compound mappings
713
*/
714
if (!pmbe->link)
715
continue;
716
717
/*
718
* Nothing to do if it already uses the largest possible
719
* page size.
720
*/
721
if (pmbe->size == SZ_512M)
722
continue;
723
724
pmb_merge(pmbe);
725
}
726
727
write_unlock_irqrestore(&pmb_rwlock, flags);
728
}
729
730
#ifdef CONFIG_UNCACHED_MAPPING
731
static void __init pmb_resize(void)
732
{
733
int i;
734
735
/*
736
* If the uncached mapping was constructed by the kernel, it will
737
* already be a reasonable size.
738
*/
739
if (uncached_size == SZ_16M)
740
return;
741
742
read_lock(&pmb_rwlock);
743
744
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
745
struct pmb_entry *pmbe;
746
unsigned long flags;
747
748
if (!test_bit(i, pmb_map))
749
continue;
750
751
pmbe = &pmb_entry_list[i];
752
753
if (pmbe->vpn != uncached_start)
754
continue;
755
756
/*
757
* Found it, now resize it.
758
*/
759
raw_spin_lock_irqsave(&pmbe->lock, flags);
760
761
pmbe->size = SZ_16M;
762
pmbe->flags &= ~PMB_SZ_MASK;
763
pmbe->flags |= pmb_size_to_flags(pmbe->size);
764
765
uncached_resize(pmbe->size);
766
767
__set_pmb_entry(pmbe);
768
769
raw_spin_unlock_irqrestore(&pmbe->lock, flags);
770
}
771
772
read_unlock(&pmb_rwlock);
773
}
774
#endif
775
776
static int __init early_pmb(char *p)
777
{
778
if (!p)
779
return 0;
780
781
if (strstr(p, "iomap"))
782
pmb_iomapping_enabled = 1;
783
784
return 0;
785
}
786
early_param("pmb", early_pmb);
787
788
void __init pmb_init(void)
789
{
790
/* Synchronize software state */
791
pmb_synchronize();
792
793
/* Attempt to combine compound mappings */
794
pmb_coalesce();
795
796
#ifdef CONFIG_UNCACHED_MAPPING
797
/* Resize initial mappings, if necessary */
798
pmb_resize();
799
#endif
800
801
/* Log them */
802
pmb_notify();
803
804
writel_uncached(0, PMB_IRMCR);
805
806
/* Flush out the TLB */
807
local_flush_tlb_all();
808
ctrl_barrier();
809
}
810
811
bool __in_29bit_mode(void)
812
{
813
return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
814
}
815
816
static int pmb_seq_show(struct seq_file *file, void *iter)
817
{
818
int i;
819
820
seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
821
"CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
822
seq_printf(file, "ety vpn ppn size flags\n");
823
824
for (i = 0; i < NR_PMB_ENTRIES; i++) {
825
unsigned long addr, data;
826
unsigned int size;
827
char *sz_str = NULL;
828
829
addr = __raw_readl(mk_pmb_addr(i));
830
data = __raw_readl(mk_pmb_data(i));
831
832
size = data & PMB_SZ_MASK;
833
sz_str = (size == PMB_SZ_16M) ? " 16MB":
834
(size == PMB_SZ_64M) ? " 64MB":
835
(size == PMB_SZ_128M) ? "128MB":
836
"512MB";
837
838
/* 02: V 0x88 0x08 128MB C CB B */
839
seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
840
i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
841
(addr >> 24) & 0xff, (data >> 24) & 0xff,
842
sz_str, (data & PMB_C) ? 'C' : ' ',
843
(data & PMB_WT) ? "WT" : "CB",
844
(data & PMB_UB) ? "UB" : " B");
845
}
846
847
return 0;
848
}
849
850
static int pmb_debugfs_open(struct inode *inode, struct file *file)
851
{
852
return single_open(file, pmb_seq_show, NULL);
853
}
854
855
static const struct file_operations pmb_debugfs_fops = {
856
.owner = THIS_MODULE,
857
.open = pmb_debugfs_open,
858
.read = seq_read,
859
.llseek = seq_lseek,
860
.release = single_release,
861
};
862
863
static int __init pmb_debugfs_init(void)
864
{
865
struct dentry *dentry;
866
867
dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
868
arch_debugfs_dir, NULL, &pmb_debugfs_fops);
869
if (!dentry)
870
return -ENOMEM;
871
872
return 0;
873
}
874
subsys_initcall(pmb_debugfs_init);
875
876
#ifdef CONFIG_PM
877
static void pmb_syscore_resume(void)
878
{
879
struct pmb_entry *pmbe;
880
int i;
881
882
read_lock(&pmb_rwlock);
883
884
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
885
if (test_bit(i, pmb_map)) {
886
pmbe = &pmb_entry_list[i];
887
set_pmb_entry(pmbe);
888
}
889
}
890
891
read_unlock(&pmb_rwlock);
892
}
893
894
static struct syscore_ops pmb_syscore_ops = {
895
.resume = pmb_syscore_resume,
896
};
897
898
static int __init pmb_sysdev_init(void)
899
{
900
register_syscore_ops(&pmb_syscore_ops);
901
return 0;
902
}
903
subsys_initcall(pmb_sysdev_init);
904
#endif
905
906