Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/mm/nohash/e500.c
26481 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Modifications by Kumar Gala ([email protected]) to support
4
* E500 Book E processors.
5
*
6
* Copyright 2004,2010 Freescale Semiconductor, Inc.
7
*
8
* This file contains the routines for initializing the MMU
9
* on the 4xx series of chips.
10
* -- paulus
11
*
12
* Derived from arch/ppc/mm/init.c:
13
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
14
*
15
* Modifications by Paul Mackerras (PowerMac) ([email protected])
16
* and Cort Dougan (PReP) ([email protected])
17
* Copyright (C) 1996 Paul Mackerras
18
*
19
* Derived from "arch/i386/mm/init.c"
20
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
21
*/
22
23
#include <linux/signal.h>
24
#include <linux/sched.h>
25
#include <linux/kernel.h>
26
#include <linux/errno.h>
27
#include <linux/string.h>
28
#include <linux/types.h>
29
#include <linux/ptrace.h>
30
#include <linux/mman.h>
31
#include <linux/mm.h>
32
#include <linux/swap.h>
33
#include <linux/stddef.h>
34
#include <linux/vmalloc.h>
35
#include <linux/init.h>
36
#include <linux/delay.h>
37
#include <linux/highmem.h>
38
#include <linux/memblock.h>
39
#include <linux/of_fdt.h>
40
41
#include <asm/io.h>
42
#include <asm/mmu_context.h>
43
#include <asm/mmu.h>
44
#include <linux/uaccess.h>
45
#include <asm/smp.h>
46
#include <asm/machdep.h>
47
#include <asm/setup.h>
48
#include <asm/paca.h>
49
50
#include <mm/mmu_decl.h>
51
52
unsigned int tlbcam_index;
53
54
struct tlbcam TLBCAM[NUM_TLBCAMS];
55
56
static struct {
57
unsigned long start;
58
unsigned long limit;
59
phys_addr_t phys;
60
} tlbcam_addrs[NUM_TLBCAMS];
61
62
#ifdef CONFIG_PPC_85xx
63
/*
64
* Return PA for this VA if it is mapped by a CAM, or 0
65
*/
66
phys_addr_t v_block_mapped(unsigned long va)
67
{
68
int b;
69
for (b = 0; b < tlbcam_index; ++b)
70
if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
71
return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
72
return 0;
73
}
74
75
/*
76
* Return VA for a given PA or 0 if not mapped
77
*/
78
unsigned long p_block_mapped(phys_addr_t pa)
79
{
80
int b;
81
for (b = 0; b < tlbcam_index; ++b)
82
if (pa >= tlbcam_addrs[b].phys
83
&& pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
84
+tlbcam_addrs[b].phys)
85
return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
86
return 0;
87
}
88
#endif
89
90
/*
91
* Set up a variable-size TLB entry (tlbcam). The parameters are not checked;
92
* in particular size must be a power of 4 between 4k and the max supported by
93
* an implementation; max may further be limited by what can be represented in
94
* an unsigned long (for example, 32-bit implementations cannot support a 4GB
95
* size).
96
*/
97
static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
98
unsigned long size, unsigned long flags, unsigned int pid)
99
{
100
unsigned int tsize;
101
102
tsize = __ilog2(size) - 10;
103
104
#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
105
if ((flags & _PAGE_NO_CACHE) == 0)
106
flags |= _PAGE_COHERENT;
107
#endif
108
109
TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
110
TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
111
TLBCAM[index].MAS2 = virt & PAGE_MASK;
112
113
TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
114
TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
115
TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
116
TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
117
TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
118
119
TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR;
120
TLBCAM[index].MAS3 |= (flags & _PAGE_WRITE) ? MAS3_SW : 0;
121
if (mmu_has_feature(MMU_FTR_BIG_PHYS))
122
TLBCAM[index].MAS7 = (u64)phys >> 32;
123
124
/* Below is unlikely -- only for large user pages or similar */
125
if (!is_kernel_addr(virt)) {
126
TLBCAM[index].MAS3 |= MAS3_UR;
127
TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
128
TLBCAM[index].MAS3 |= (flags & _PAGE_WRITE) ? MAS3_UW : 0;
129
} else {
130
TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0;
131
}
132
133
tlbcam_addrs[index].start = virt;
134
tlbcam_addrs[index].limit = virt + size - 1;
135
tlbcam_addrs[index].phys = phys;
136
}
137
138
static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
139
phys_addr_t phys)
140
{
141
unsigned int camsize = __ilog2(ram);
142
unsigned int align = __ffs(virt | phys);
143
unsigned long max_cam;
144
145
if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
146
/* Convert (4^max) kB to (2^max) bytes */
147
max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10;
148
camsize &= ~1U;
149
align &= ~1U;
150
} else {
151
/* Convert (2^max) kB to (2^max) bytes */
152
max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10;
153
}
154
155
if (camsize > align)
156
camsize = align;
157
if (camsize > max_cam)
158
camsize = max_cam;
159
160
return 1UL << camsize;
161
}
162
163
static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
164
unsigned long ram, int max_cam_idx,
165
bool dryrun, bool init)
166
{
167
int i;
168
unsigned long amount_mapped = 0;
169
unsigned long boundary;
170
171
if (strict_kernel_rwx_enabled())
172
boundary = (unsigned long)(_sinittext - _stext);
173
else
174
boundary = ram;
175
176
/* Calculate CAM values */
177
for (i = 0; boundary && i < max_cam_idx; i++) {
178
unsigned long cam_sz;
179
pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX;
180
181
cam_sz = calc_cam_sz(boundary, virt, phys);
182
if (!dryrun)
183
settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
184
185
boundary -= cam_sz;
186
amount_mapped += cam_sz;
187
virt += cam_sz;
188
phys += cam_sz;
189
}
190
for (ram -= amount_mapped; ram && i < max_cam_idx; i++) {
191
unsigned long cam_sz;
192
pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL;
193
194
cam_sz = calc_cam_sz(ram, virt, phys);
195
if (!dryrun)
196
settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
197
198
ram -= cam_sz;
199
amount_mapped += cam_sz;
200
virt += cam_sz;
201
phys += cam_sz;
202
}
203
204
if (dryrun)
205
return amount_mapped;
206
207
if (init) {
208
loadcam_multi(0, i, max_cam_idx);
209
tlbcam_index = i;
210
} else {
211
loadcam_multi(0, i, 0);
212
WARN_ON(i > tlbcam_index);
213
}
214
215
#ifdef CONFIG_PPC64
216
get_paca()->tcd.esel_next = i;
217
get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
218
get_paca()->tcd.esel_first = i;
219
#endif
220
221
return amount_mapped;
222
}
223
224
unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init)
225
{
226
unsigned long virt = PAGE_OFFSET;
227
phys_addr_t phys = memstart_addr;
228
229
return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init);
230
}
231
232
#ifdef CONFIG_PPC32
233
234
#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
235
#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
236
#endif
237
238
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
239
{
240
return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1;
241
}
242
243
void flush_instruction_cache(void)
244
{
245
unsigned long tmp;
246
247
tmp = mfspr(SPRN_L1CSR1);
248
tmp |= L1CSR1_ICFI | L1CSR1_ICLFR;
249
mtspr(SPRN_L1CSR1, tmp);
250
isync();
251
}
252
253
/*
254
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
255
*/
256
void __init MMU_init_hw(void)
257
{
258
flush_instruction_cache();
259
}
260
261
static unsigned long __init tlbcam_sz(int idx)
262
{
263
return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
264
}
265
266
void __init adjust_total_lowmem(void)
267
{
268
unsigned long ram;
269
int i;
270
271
/* adjust lowmem size to __max_low_memory */
272
ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
273
274
i = switch_to_as1();
275
__max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true);
276
restore_to_as0(i, 0, NULL, 1);
277
278
pr_info("Memory CAM mapping: ");
279
for (i = 0; i < tlbcam_index - 1; i++)
280
pr_cont("%lu/", tlbcam_sz(i) >> 20);
281
pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20,
282
(unsigned int)((total_lowmem - __max_low_memory) >> 20));
283
284
memblock_set_current_limit(memstart_addr + __max_low_memory);
285
}
286
287
#ifdef CONFIG_STRICT_KERNEL_RWX
288
int mmu_mark_rodata_ro(void)
289
{
290
unsigned long remapped;
291
292
remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
293
294
if (WARN_ON(__max_low_memory != remapped))
295
return -EINVAL;
296
297
return 0;
298
}
299
#endif
300
301
int mmu_mark_initmem_nx(void)
302
{
303
/* Everything is done in mmu_mark_rodata_ro() */
304
return 0;
305
}
306
307
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
308
phys_addr_t first_memblock_size)
309
{
310
phys_addr_t limit = first_memblock_base + first_memblock_size;
311
312
/* 64M mapped initially according to head_fsl_booke.S */
313
memblock_set_current_limit(min_t(u64, limit, 0x04000000));
314
}
315
316
#ifdef CONFIG_RELOCATABLE
317
int __initdata is_second_reloc;
318
notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
319
{
320
unsigned long base = kernstart_virt_addr;
321
phys_addr_t size;
322
323
kernstart_addr = start;
324
if (is_second_reloc) {
325
virt_phys_offset = PAGE_OFFSET - memstart_addr;
326
kaslr_late_init();
327
return;
328
}
329
330
/*
331
* Relocatable kernel support based on processing of dynamic
332
* relocation entries. Before we get the real memstart_addr,
333
* We will compute the virt_phys_offset like this:
334
* virt_phys_offset = stext.run - kernstart_addr
335
*
336
* stext.run = (KERNELBASE & ~0x3ffffff) +
337
* (kernstart_addr & 0x3ffffff)
338
* When we relocate, we have :
339
*
340
* (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff)
341
*
342
* hence:
343
* virt_phys_offset = (KERNELBASE & ~0x3ffffff) -
344
* (kernstart_addr & ~0x3ffffff)
345
*
346
*/
347
start &= ~0x3ffffff;
348
base &= ~0x3ffffff;
349
virt_phys_offset = base - start;
350
early_get_first_memblock_info(__va(dt_ptr), &size);
351
/*
352
* We now get the memstart_addr, then we should check if this
353
* address is the same as what the PAGE_OFFSET map to now. If
354
* not we have to change the map of PAGE_OFFSET to memstart_addr
355
* and do a second relocation.
356
*/
357
if (start != memstart_addr) {
358
int n;
359
long offset = start - memstart_addr;
360
361
is_second_reloc = 1;
362
n = switch_to_as1();
363
/* map a 64M area for the second relocation */
364
if (memstart_addr > start)
365
map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM,
366
false, true);
367
else
368
map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
369
0x4000000, CONFIG_LOWMEM_CAM_NUM,
370
false, true);
371
restore_to_as0(n, offset, __va(dt_ptr), 1);
372
/* We should never reach here */
373
panic("Relocation error");
374
}
375
376
kaslr_early_init(__va(dt_ptr), size);
377
}
378
#endif
379
#endif
380
381