Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/mm/book3s32/mmu.c
26481 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* This file contains the routines for handling the MMU on those
4
* PowerPC implementations where the MMU substantially follows the
5
* architecture specification. This includes the 6xx, 7xx, 7xxx,
6
* and 8260 implementations but excludes the 8xx and 4xx.
7
* -- paulus
8
*
9
* Derived from arch/ppc/mm/init.c:
10
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
11
*
12
* Modifications by Paul Mackerras (PowerMac) ([email protected])
13
* and Cort Dougan (PReP) ([email protected])
14
* Copyright (C) 1996 Paul Mackerras
15
*
16
* Derived from "arch/i386/mm/init.c"
17
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18
*/
19
20
#include <linux/kernel.h>
21
#include <linux/mm.h>
22
#include <linux/init.h>
23
#include <linux/highmem.h>
24
#include <linux/memblock.h>
25
26
#include <asm/mmu.h>
27
#include <asm/machdep.h>
28
#include <asm/text-patching.h>
29
#include <asm/sections.h>
30
31
#include <mm/mmu_decl.h>
32
33
u8 __initdata early_hash[SZ_256K] __aligned(SZ_256K) = {0};
34
35
static struct hash_pte __initdata *Hash = (struct hash_pte *)early_hash;
36
static unsigned long __initdata Hash_size, Hash_mask;
37
static unsigned int __initdata hash_mb, hash_mb2;
38
unsigned long __initdata _SDR1;
39
40
struct ppc_bat BATS[8][2]; /* 8 pairs of IBAT, DBAT */
41
42
static struct batrange { /* stores address ranges mapped by BATs */
43
unsigned long start;
44
unsigned long limit;
45
phys_addr_t phys;
46
} bat_addrs[8];
47
48
#ifdef CONFIG_SMP
49
unsigned long mmu_hash_lock;
50
#endif
51
52
/*
53
* Return PA for this VA if it is mapped by a BAT, or 0
54
*/
55
phys_addr_t v_block_mapped(unsigned long va)
56
{
57
int b;
58
for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
59
if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
60
return bat_addrs[b].phys + (va - bat_addrs[b].start);
61
return 0;
62
}
63
64
/*
65
* Return VA for a given PA or 0 if not mapped
66
*/
67
unsigned long p_block_mapped(phys_addr_t pa)
68
{
69
int b;
70
for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
71
if (pa >= bat_addrs[b].phys
72
&& pa < (bat_addrs[b].limit-bat_addrs[b].start)
73
+bat_addrs[b].phys)
74
return bat_addrs[b].start+(pa-bat_addrs[b].phys);
75
return 0;
76
}
77
78
int __init find_free_bat(void)
79
{
80
int b;
81
int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
82
83
for (b = 0; b < n; b++) {
84
struct ppc_bat *bat = BATS[b];
85
86
if (!(bat[1].batu & 3))
87
return b;
88
}
89
return -1;
90
}
91
92
/*
93
* This function calculates the size of the larger block usable to map the
94
* beginning of an area based on the start address and size of that area:
95
* - max block size is 256 on 6xx.
96
* - base address must be aligned to the block size. So the maximum block size
97
* is identified by the lowest bit set to 1 in the base address (for instance
98
* if base is 0x16000000, max size is 0x02000000).
99
* - block size has to be a power of two. This is calculated by finding the
100
* highest bit set to 1.
101
*/
102
unsigned int bat_block_size(unsigned long base, unsigned long top)
103
{
104
unsigned int max_size = SZ_256M;
105
unsigned int base_shift = (ffs(base) - 1) & 31;
106
unsigned int block_shift = (fls(top - base) - 1) & 31;
107
108
return min3(max_size, 1U << base_shift, 1U << block_shift);
109
}
110
111
/*
112
* Set up one of the IBAT (block address translation) register pairs.
113
* The parameters are not checked; in particular size must be a power
114
* of 2 between 128k and 256M.
115
*/
116
static void setibat(int index, unsigned long virt, phys_addr_t phys,
117
unsigned int size, pgprot_t prot)
118
{
119
unsigned int bl = (size >> 17) - 1;
120
int wimgxpp;
121
struct ppc_bat *bat = BATS[index];
122
unsigned long flags = pgprot_val(prot);
123
124
if (!cpu_has_feature(CPU_FTR_NEED_COHERENT))
125
flags &= ~_PAGE_COHERENT;
126
127
wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
128
bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
129
bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
130
if (!is_kernel_addr(virt))
131
bat[0].batu |= 1; /* Vp = 1 */
132
}
133
134
static void clearibat(int index)
135
{
136
struct ppc_bat *bat = BATS[index];
137
138
bat[0].batu = 0;
139
bat[0].batl = 0;
140
}
141
142
static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top)
143
{
144
int idx;
145
146
while ((idx = find_free_bat()) != -1 && base != top) {
147
unsigned int size = bat_block_size(base, top);
148
149
if (size < 128 << 10)
150
break;
151
setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
152
base += size;
153
}
154
155
return base;
156
}
157
158
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
159
{
160
unsigned long done;
161
unsigned long border = (unsigned long)__srwx_boundary - PAGE_OFFSET;
162
unsigned long size;
163
164
size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET);
165
setibat(0, PAGE_OFFSET, 0, size, PAGE_KERNEL_X);
166
167
if (debug_pagealloc_enabled_or_kfence()) {
168
pr_debug_once("Read-Write memory mapped without BATs\n");
169
if (base >= border)
170
return base;
171
if (top >= border)
172
top = border;
173
}
174
175
if (!strict_kernel_rwx_enabled() || base >= border || top <= border)
176
return __mmu_mapin_ram(base, top);
177
178
done = __mmu_mapin_ram(base, border);
179
if (done != border)
180
return done;
181
182
return __mmu_mapin_ram(border, top);
183
}
184
185
static bool is_module_segment(unsigned long addr)
186
{
187
if (!IS_ENABLED(CONFIG_EXECMEM))
188
return false;
189
if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
190
return false;
191
if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
192
return false;
193
return true;
194
}
195
196
int mmu_mark_initmem_nx(void)
197
{
198
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
199
int i;
200
unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
201
unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
202
unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
203
unsigned long size;
204
205
for (i = 0; i < nb - 1 && base < top;) {
206
size = bat_block_size(base, top);
207
setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
208
base += size;
209
}
210
if (base < top) {
211
size = bat_block_size(base, top);
212
if ((top - base) > size) {
213
size <<= 1;
214
if (strict_kernel_rwx_enabled() && base + size > border)
215
pr_warn("Some RW data is getting mapped X. "
216
"Adjust CONFIG_DATA_SHIFT to avoid that.\n");
217
}
218
setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
219
base += size;
220
}
221
for (; i < nb; i++)
222
clearibat(i);
223
224
update_bats();
225
226
BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, SZ_256M) < TASK_SIZE);
227
228
for (i = TASK_SIZE >> 28; i < 16; i++) {
229
/* Do not set NX on VM space for modules */
230
if (is_module_segment(i << 28))
231
continue;
232
233
mtsr(mfsr(i << 28) | 0x10000000, i << 28);
234
}
235
return 0;
236
}
237
238
int mmu_mark_rodata_ro(void)
239
{
240
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
241
int i;
242
243
for (i = 0; i < nb; i++) {
244
struct ppc_bat *bat = BATS[i];
245
246
if (bat_addrs[i].start < (unsigned long)__end_rodata)
247
bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX;
248
}
249
250
update_bats();
251
252
return 0;
253
}
254
255
/*
256
* Set up one of the D BAT (block address translation) register pairs.
257
* The parameters are not checked; in particular size must be a power
258
* of 2 between 128k and 256M.
259
*/
260
void __init setbat(int index, unsigned long virt, phys_addr_t phys,
261
unsigned int size, pgprot_t prot)
262
{
263
unsigned int bl;
264
int wimgxpp;
265
struct ppc_bat *bat;
266
unsigned long flags = pgprot_val(prot);
267
268
if (index == -1)
269
index = find_free_bat();
270
if (index == -1) {
271
pr_err("%s: no BAT available for mapping 0x%llx\n", __func__,
272
(unsigned long long)phys);
273
return;
274
}
275
bat = BATS[index];
276
277
if ((flags & _PAGE_NO_CACHE) ||
278
(cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
279
flags &= ~_PAGE_COHERENT;
280
281
bl = (size >> 17) - 1;
282
/* Do DBAT first */
283
wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
284
| _PAGE_COHERENT | _PAGE_GUARDED);
285
wimgxpp |= (flags & _PAGE_WRITE) ? BPP_RW : BPP_RX;
286
bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
287
bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
288
if (!is_kernel_addr(virt))
289
bat[1].batu |= 1; /* Vp = 1 */
290
if (flags & _PAGE_GUARDED) {
291
/* G bit must be zero in IBATs */
292
flags &= ~_PAGE_EXEC;
293
}
294
295
bat_addrs[index].start = virt;
296
bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
297
bat_addrs[index].phys = phys;
298
}
299
300
/*
301
* Preload a translation in the hash table
302
*/
303
static void hash_preload(struct mm_struct *mm, unsigned long ea)
304
{
305
pmd_t *pmd;
306
307
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
308
return;
309
pmd = pmd_off(mm, ea);
310
if (!pmd_none(*pmd))
311
add_hash_page(mm->context.id, ea, pmd_val(*pmd));
312
}
313
314
/*
315
* This is called at the end of handling a user page fault, when the
316
* fault has been handled by updating a PTE in the linux page tables.
317
* We use it to preload an HPTE into the hash table corresponding to
318
* the updated linux PTE.
319
*
320
* This must always be called with the pte lock held.
321
*/
322
void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
323
pte_t *ptep)
324
{
325
/*
326
* We don't need to worry about _PAGE_PRESENT here because we are
327
* called with either mm->page_table_lock held or ptl lock held
328
*/
329
330
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
331
if (!pte_young(*ptep) || address >= TASK_SIZE)
332
return;
333
334
/* We have to test for regs NULL since init will get here first thing at boot */
335
if (!current->thread.regs)
336
return;
337
338
/* We also avoid filling the hash if not coming from a fault */
339
if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
340
return;
341
342
hash_preload(vma->vm_mm, address);
343
}
344
345
/*
346
* Initialize the hash table and patch the instructions in hashtable.S.
347
*/
348
void __init MMU_init_hw(void)
349
{
350
unsigned int n_hpteg, lg_n_hpteg;
351
352
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
353
return;
354
355
if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
356
357
#define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
358
#define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
359
#define MIN_N_HPTEG 1024 /* min 64kB hash table */
360
361
/*
362
* Allow 1 HPTE (1/8 HPTEG) for each page of memory.
363
* This is less than the recommended amount, but then
364
* Linux ain't AIX.
365
*/
366
n_hpteg = total_memory / (PAGE_SIZE * 8);
367
if (n_hpteg < MIN_N_HPTEG)
368
n_hpteg = MIN_N_HPTEG;
369
lg_n_hpteg = __ilog2(n_hpteg);
370
if (n_hpteg & (n_hpteg - 1)) {
371
++lg_n_hpteg; /* round up if not power of 2 */
372
n_hpteg = 1 << lg_n_hpteg;
373
}
374
Hash_size = n_hpteg << LG_HPTEG_SIZE;
375
376
/*
377
* Find some memory for the hash table.
378
*/
379
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
380
Hash = memblock_alloc_or_panic(Hash_size, Hash_size);
381
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
382
383
pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
384
(unsigned long long)(total_memory >> 20), Hash_size >> 10);
385
386
387
Hash_mask = n_hpteg - 1;
388
hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
389
if (lg_n_hpteg > 16)
390
hash_mb2 = 16 - LG_HPTEG_SIZE;
391
}
392
393
void __init MMU_init_hw_patch(void)
394
{
395
unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
396
unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
397
398
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
399
return;
400
401
if (ppc_md.progress)
402
ppc_md.progress("hash:patch", 0x345);
403
if (ppc_md.progress)
404
ppc_md.progress("hash:done", 0x205);
405
406
/* WARNING: Make sure nothing can trigger a KASAN check past this point */
407
408
/*
409
* Patch up the instructions in hashtable.S:create_hpte
410
*/
411
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
412
modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
413
modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
414
modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
415
modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
416
417
/*
418
* Patch up the instructions in hashtable.S:flush_hash_page
419
*/
420
modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
421
modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
422
modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
423
modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
424
}
425
426
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
427
phys_addr_t first_memblock_size)
428
{
429
/* We don't currently support the first MEMBLOCK not mapping 0
430
* physical on those processors
431
*/
432
BUG_ON(first_memblock_base != 0);
433
434
memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_256M));
435
}
436
437
void __init print_system_hash_info(void)
438
{
439
pr_info("Hash_size = 0x%lx\n", Hash_size);
440
if (Hash_mask)
441
pr_info("Hash_mask = 0x%lx\n", Hash_mask);
442
}
443
444
void __init early_init_mmu(void)
445
{
446
}
447
448