Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/blackfin/kernel/cplb-nompu/cplbinit.c
15126 views
1
/*
2
* Blackfin CPLB initialization
3
*
4
* Copyright 2007-2009 Analog Devices Inc.
5
*
6
* Licensed under the GPL-2 or later.
7
*/
8
9
#include <linux/module.h>
10
11
#include <asm/blackfin.h>
12
#include <asm/cacheflush.h>
13
#include <asm/cplb.h>
14
#include <asm/cplbinit.h>
15
#include <asm/mem_map.h>
16
17
struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
18
struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
19
20
int first_switched_icplb PDT_ATTR;
21
int first_switched_dcplb PDT_ATTR;
22
23
struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
24
struct cplb_boundary icplb_bounds[9] PDT_ATTR;
25
26
int icplb_nr_bounds PDT_ATTR;
27
int dcplb_nr_bounds PDT_ATTR;
28
29
void __init generate_cplb_tables_cpu(unsigned int cpu)
30
{
31
int i_d, i_i;
32
unsigned long addr;
33
34
struct cplb_entry *d_tbl = dcplb_tbl[cpu];
35
struct cplb_entry *i_tbl = icplb_tbl[cpu];
36
37
printk(KERN_INFO "NOMPU: setting up cplb tables\n");
38
39
i_d = i_i = 0;
40
41
#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
42
/* Set up the zero page. */
43
d_tbl[i_d].addr = 0;
44
d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
45
i_tbl[i_i].addr = 0;
46
i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
47
#endif
48
49
/* Cover kernel memory with 4M pages. */
50
addr = 0;
51
52
for (; addr < memory_start; addr += 4 * 1024 * 1024) {
53
d_tbl[i_d].addr = addr;
54
d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
55
i_tbl[i_i].addr = addr;
56
i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
57
}
58
59
#ifdef CONFIG_ROMKERNEL
60
/* Cover kernel XIP flash area */
61
addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
62
d_tbl[i_d].addr = addr;
63
d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
64
i_tbl[i_i].addr = addr;
65
i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
66
#endif
67
68
/* Cover L1 memory. One 4M area for code and data each is enough. */
69
if (cpu == 0) {
70
if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
71
d_tbl[i_d].addr = L1_DATA_A_START;
72
d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
73
}
74
i_tbl[i_i].addr = L1_CODE_START;
75
i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
76
}
77
#ifdef CONFIG_SMP
78
else {
79
if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
80
d_tbl[i_d].addr = COREB_L1_DATA_A_START;
81
d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
82
}
83
i_tbl[i_i].addr = COREB_L1_CODE_START;
84
i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
85
}
86
#endif
87
first_switched_dcplb = i_d;
88
first_switched_icplb = i_i;
89
90
BUG_ON(first_switched_dcplb > MAX_CPLBS);
91
BUG_ON(first_switched_icplb > MAX_CPLBS);
92
93
while (i_d < MAX_CPLBS)
94
d_tbl[i_d++].data = 0;
95
while (i_i < MAX_CPLBS)
96
i_tbl[i_i++].data = 0;
97
}
98
99
void __init generate_cplb_tables_all(void)
100
{
101
unsigned long uncached_end;
102
int i_d, i_i;
103
104
i_d = 0;
105
/* Normal RAM, including MTD FS. */
106
#ifdef CONFIG_MTD_UCLINUX
107
uncached_end = memory_mtd_start + mtd_size;
108
#else
109
uncached_end = memory_end;
110
#endif
111
/*
112
* if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
113
* so that we don't have to use 4kB pages and cause CPLB thrashing
114
*/
115
if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
116
((_ramend - uncached_end) >= 1 * 1024 * 1024))
117
dcplb_bounds[i_d].eaddr = uncached_end;
118
else
119
dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
120
dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
121
/* DMA uncached region. */
122
if (DMA_UNCACHED_REGION) {
123
dcplb_bounds[i_d].eaddr = _ramend;
124
dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
125
}
126
if (_ramend != physical_mem_end) {
127
/* Reserved memory. */
128
dcplb_bounds[i_d].eaddr = physical_mem_end;
129
dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
130
SDRAM_DGENERIC : SDRAM_DNON_CHBL);
131
}
132
/* Addressing hole up to the async bank. */
133
dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
134
dcplb_bounds[i_d++].data = 0;
135
/* ASYNC banks. */
136
dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
137
dcplb_bounds[i_d++].data = SDRAM_EBIU;
138
/* Addressing hole up to BootROM. */
139
dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
140
dcplb_bounds[i_d++].data = 0;
141
/* BootROM -- largest one should be less than 1 meg. */
142
dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
143
dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
144
if (L2_LENGTH) {
145
/* Addressing hole up to L2 SRAM. */
146
dcplb_bounds[i_d].eaddr = L2_START;
147
dcplb_bounds[i_d++].data = 0;
148
/* L2 SRAM. */
149
dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
150
dcplb_bounds[i_d++].data = L2_DMEMORY;
151
}
152
dcplb_nr_bounds = i_d;
153
BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
154
155
i_i = 0;
156
/* Normal RAM, including MTD FS. */
157
icplb_bounds[i_i].eaddr = uncached_end;
158
icplb_bounds[i_i++].data = SDRAM_IGENERIC;
159
if (_ramend != physical_mem_end) {
160
/* DMA uncached region. */
161
if (DMA_UNCACHED_REGION) {
162
/* Normally this hole is caught by the async below. */
163
icplb_bounds[i_i].eaddr = _ramend;
164
icplb_bounds[i_i++].data = 0;
165
}
166
/* Reserved memory. */
167
icplb_bounds[i_i].eaddr = physical_mem_end;
168
icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
169
SDRAM_IGENERIC : SDRAM_INON_CHBL);
170
}
171
/* Addressing hole up to the async bank. */
172
icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE;
173
icplb_bounds[i_i++].data = 0;
174
/* ASYNC banks. */
175
icplb_bounds[i_i].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
176
icplb_bounds[i_i++].data = SDRAM_EBIU;
177
/* Addressing hole up to BootROM. */
178
icplb_bounds[i_i].eaddr = BOOT_ROM_START;
179
icplb_bounds[i_i++].data = 0;
180
/* BootROM -- largest one should be less than 1 meg. */
181
icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
182
icplb_bounds[i_i++].data = SDRAM_IGENERIC;
183
184
if (L2_LENGTH) {
185
/* Addressing hole up to L2 SRAM. */
186
icplb_bounds[i_i].eaddr = L2_START;
187
icplb_bounds[i_i++].data = 0;
188
/* L2 SRAM. */
189
icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
190
icplb_bounds[i_i++].data = L2_IMEMORY;
191
}
192
icplb_nr_bounds = i_i;
193
BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
194
}
195
196