Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/mm/ioremap.c
26444 views
1
/*
2
* This file is subject to the terms and conditions of the GNU General Public
3
* License. See the file "COPYING" in the main directory of this archive
4
* for more details.
5
*
6
* (C) Copyright 1995 1996 Linus Torvalds
7
* (C) Copyright 2001, 2002 Ralf Baechle
8
*/
9
#include <linux/export.h>
10
#include <asm/addrspace.h>
11
#include <asm/byteorder.h>
12
#include <linux/ioport.h>
13
#include <linux/sched.h>
14
#include <linux/slab.h>
15
#include <linux/vmalloc.h>
16
#include <linux/mm_types.h>
17
#include <linux/io.h>
18
#include <asm/cacheflush.h>
19
#include <asm/tlbflush.h>
20
#include <ioremap.h>
21
22
#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
23
#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
24
25
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
26
void *arg)
27
{
28
unsigned long i;
29
30
for (i = 0; i < nr_pages; i++) {
31
if (pfn_valid(start_pfn + i) &&
32
!PageReserved(pfn_to_page(start_pfn + i)))
33
return 1;
34
}
35
36
return 0;
37
}
38
39
/*
40
* ioremap_prot - map bus memory into CPU space
41
* @phys_addr: bus address of the memory
42
* @size: size of the resource to map
43
*
44
* ioremap_prot gives the caller control over cache coherency attributes (CCA)
45
*/
46
void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
47
pgprot_t prot)
48
{
49
unsigned long flags = pgprot_val(prot) & _CACHE_MASK;
50
unsigned long offset, pfn, last_pfn;
51
struct vm_struct *area;
52
phys_addr_t last_addr;
53
unsigned long vaddr;
54
void __iomem *cpu_addr;
55
56
cpu_addr = plat_ioremap(phys_addr, size, flags);
57
if (cpu_addr)
58
return cpu_addr;
59
60
phys_addr = fixup_bigphys_addr(phys_addr, size);
61
62
/* Don't allow wraparound or zero size */
63
last_addr = phys_addr + size - 1;
64
if (!size || last_addr < phys_addr)
65
return NULL;
66
67
/*
68
* Map uncached objects in the low 512mb of address space using KSEG1,
69
* otherwise map using page tables.
70
*/
71
if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
72
flags == _CACHE_UNCACHED)
73
return (void __iomem *) CKSEG1ADDR(phys_addr);
74
75
/* Early remaps should use the unmapped regions til' VM is available */
76
if (WARN_ON_ONCE(!slab_is_available()))
77
return NULL;
78
79
/*
80
* Don't allow anybody to remap RAM that may be allocated by the page
81
* allocator, since that could lead to races & data clobbering.
82
*/
83
pfn = PFN_DOWN(phys_addr);
84
last_pfn = PFN_DOWN(last_addr);
85
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
86
__ioremap_check_ram) == 1) {
87
WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
88
&phys_addr, &last_addr);
89
return NULL;
90
}
91
92
/*
93
* Mappings have to be page-aligned
94
*/
95
offset = phys_addr & ~PAGE_MASK;
96
phys_addr &= PAGE_MASK;
97
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
98
99
/*
100
* Ok, go for it..
101
*/
102
area = get_vm_area(size, VM_IOREMAP);
103
if (!area)
104
return NULL;
105
vaddr = (unsigned long)area->addr;
106
107
flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
108
if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
109
__pgprot(flags))) {
110
free_vm_area(area);
111
return NULL;
112
}
113
114
return (void __iomem *)(vaddr + offset);
115
}
116
EXPORT_SYMBOL(ioremap_prot);
117
118
void iounmap(const volatile void __iomem *addr)
119
{
120
if (!plat_iounmap(addr) && !IS_KSEG1(addr))
121
vunmap((void *)((unsigned long)addr & PAGE_MASK));
122
}
123
EXPORT_SYMBOL(iounmap);
124
125