Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/m68k/mm/motorola.c
26439 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* linux/arch/m68k/mm/motorola.c
4
*
5
* Routines specific to the Motorola MMU, originally from:
6
* linux/arch/m68k/init.c
7
* which are Copyright (C) 1995 Hamish Macdonald
8
*
9
* Moved 8/20/1999 Sam Creasey
10
*/
11
12
#include <linux/module.h>
13
#include <linux/signal.h>
14
#include <linux/sched.h>
15
#include <linux/mm.h>
16
#include <linux/swap.h>
17
#include <linux/kernel.h>
18
#include <linux/string.h>
19
#include <linux/types.h>
20
#include <linux/init.h>
21
#include <linux/memblock.h>
22
#include <linux/gfp.h>
23
24
#include <asm/setup.h>
25
#include <linux/uaccess.h>
26
#include <asm/page.h>
27
#include <asm/pgalloc.h>
28
#include <asm/machdep.h>
29
#include <asm/io.h>
30
#ifdef CONFIG_ATARI
31
#include <asm/atari_stram.h>
32
#endif
33
#include <asm/sections.h>
34
35
#undef DEBUG
36
37
#ifndef mm_cachebits
38
/*
39
* Bits to add to page descriptors for "normal" caching mode.
40
* For 68020/030 this is 0.
41
* For 68040, this is _PAGE_CACHE040 (cachable, copyback)
42
*/
43
unsigned long mm_cachebits;
44
EXPORT_SYMBOL(mm_cachebits);
45
#endif
46
47
/* Prior to calling these routines, the page should have been flushed
48
* from both the cache and ATC, or the CPU might not notice that the
49
* cache setting for the page has been changed. -jskov
50
*/
51
static inline void nocache_page(void *vaddr)
52
{
53
unsigned long addr = (unsigned long)vaddr;
54
55
if (CPU_IS_040_OR_060) {
56
pte_t *ptep = virt_to_kpte(addr);
57
58
*ptep = pte_mknocache(*ptep);
59
}
60
}
61
62
static inline void cache_page(void *vaddr)
63
{
64
unsigned long addr = (unsigned long)vaddr;
65
66
if (CPU_IS_040_OR_060) {
67
pte_t *ptep = virt_to_kpte(addr);
68
69
*ptep = pte_mkcache(*ptep);
70
}
71
}
72
73
/*
74
* Motorola 680x0 user's manual recommends using uncached memory for address
75
* translation tables.
76
*
77
* Seeing how the MMU can be external on (some of) these chips, that seems like
78
* a very important recommendation to follow. Provide some helpers to combat
79
* 'variation' amongst the users of this.
80
*/
81
82
void mmu_page_ctor(void *page)
83
{
84
__flush_pages_to_ram(page, 1);
85
flush_tlb_kernel_page(page);
86
nocache_page(page);
87
}
88
89
void mmu_page_dtor(void *page)
90
{
91
cache_page(page);
92
}
93
94
/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
95
struct ptdesc instead of separately kmalloced struct. Stolen from
96
arch/sparc/mm/srmmu.c ... */
97
98
typedef struct list_head ptable_desc;
99
100
static struct list_head ptable_list[3] = {
101
LIST_HEAD_INIT(ptable_list[0]),
102
LIST_HEAD_INIT(ptable_list[1]),
103
LIST_HEAD_INIT(ptable_list[2]),
104
};
105
106
#define PD_PTABLE(ptdesc) ((ptable_desc *)&(virt_to_ptdesc((void *)(ptdesc))->pt_list))
107
#define PD_PTDESC(ptable) (list_entry(ptable, struct ptdesc, pt_list))
108
#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PTDESC(dp)->pt_index)
109
110
static const int ptable_shift[3] = {
111
7+2, /* PGD */
112
7+2, /* PMD */
113
6+2, /* PTE */
114
};
115
116
#define ptable_size(type) (1U << ptable_shift[type])
117
#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
118
119
void __init init_pointer_table(void *table, int type)
120
{
121
ptable_desc *dp;
122
unsigned long ptable = (unsigned long)table;
123
unsigned long pt_addr = ptable & PAGE_MASK;
124
unsigned int mask = 1U << ((ptable - pt_addr)/ptable_size(type));
125
126
dp = PD_PTABLE(pt_addr);
127
if (!(PD_MARKBITS(dp) & mask)) {
128
PD_MARKBITS(dp) = ptable_mask(type);
129
list_add(dp, &ptable_list[type]);
130
}
131
132
PD_MARKBITS(dp) &= ~mask;
133
pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
134
135
/* unreserve the ptdesc so it's possible to free that ptdesc */
136
__ClearPageReserved(ptdesc_page(PD_PTDESC(dp)));
137
init_page_count(ptdesc_page(PD_PTDESC(dp)));
138
139
return;
140
}
141
142
void *get_pointer_table(struct mm_struct *mm, int type)
143
{
144
ptable_desc *dp = ptable_list[type].next;
145
unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
146
unsigned int tmp, off;
147
148
/*
149
* For a pointer table for a user process address space, a
150
* table is taken from a ptdesc allocated for the purpose. Each
151
* ptdesc can hold 8 pointer tables. The ptdesc is remapped in
152
* virtual address space to be noncacheable.
153
*/
154
if (mask == 0) {
155
struct ptdesc *ptdesc;
156
ptable_desc *new;
157
void *pt_addr;
158
159
ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0);
160
if (!ptdesc)
161
return NULL;
162
163
pt_addr = ptdesc_address(ptdesc);
164
165
switch (type) {
166
case TABLE_PTE:
167
/*
168
* m68k doesn't have SPLIT_PTE_PTLOCKS for not having
169
* SMP.
170
*/
171
pagetable_pte_ctor(mm, ptdesc);
172
break;
173
case TABLE_PMD:
174
pagetable_pmd_ctor(mm, ptdesc);
175
break;
176
case TABLE_PGD:
177
pagetable_pgd_ctor(ptdesc);
178
break;
179
}
180
181
mmu_page_ctor(pt_addr);
182
183
new = PD_PTABLE(pt_addr);
184
PD_MARKBITS(new) = ptable_mask(type) - 1;
185
list_add_tail(new, dp);
186
187
return (pmd_t *)pt_addr;
188
}
189
190
for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
191
;
192
PD_MARKBITS(dp) = mask & ~tmp;
193
if (!PD_MARKBITS(dp)) {
194
/* move to end of list */
195
list_move_tail(dp, &ptable_list[type]);
196
}
197
return ptdesc_address(PD_PTDESC(dp)) + off;
198
}
199
200
int free_pointer_table(void *table, int type)
201
{
202
ptable_desc *dp;
203
unsigned long ptable = (unsigned long)table;
204
unsigned long pt_addr = ptable & PAGE_MASK;
205
unsigned int mask = 1U << ((ptable - pt_addr)/ptable_size(type));
206
207
dp = PD_PTABLE(pt_addr);
208
if (PD_MARKBITS (dp) & mask)
209
panic ("table already free!");
210
211
PD_MARKBITS (dp) |= mask;
212
213
if (PD_MARKBITS(dp) == ptable_mask(type)) {
214
/* all tables in ptdesc are free, free ptdesc */
215
list_del(dp);
216
mmu_page_dtor((void *)pt_addr);
217
pagetable_dtor_free(virt_to_ptdesc((void *)pt_addr));
218
return 1;
219
} else if (ptable_list[type].next != dp) {
220
/*
221
* move this descriptor to the front of the list, since
222
* it has one or more free tables.
223
*/
224
list_move(dp, &ptable_list[type]);
225
}
226
return 0;
227
}
228
229
/* size of memory already mapped in head.S */
230
extern __initdata unsigned long m68k_init_mapped_size;
231
232
extern unsigned long availmem;
233
234
static pte_t *last_pte_table __initdata = NULL;
235
236
static pte_t * __init kernel_page_table(void)
237
{
238
pte_t *pte_table = last_pte_table;
239
240
if (PAGE_ALIGNED(last_pte_table)) {
241
pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
242
if (!pte_table) {
243
panic("%s: Failed to allocate %lu bytes align=%lx\n",
244
__func__, PAGE_SIZE, PAGE_SIZE);
245
}
246
247
clear_page(pte_table);
248
mmu_page_ctor(pte_table);
249
250
last_pte_table = pte_table;
251
}
252
253
last_pte_table += PTRS_PER_PTE;
254
255
return pte_table;
256
}
257
258
static pmd_t *last_pmd_table __initdata = NULL;
259
260
static pmd_t * __init kernel_ptr_table(void)
261
{
262
if (!last_pmd_table) {
263
unsigned long pmd, last;
264
int i;
265
266
/* Find the last ptr table that was used in head.S and
267
* reuse the remaining space in that page for further
268
* ptr tables.
269
*/
270
last = (unsigned long)kernel_pg_dir;
271
for (i = 0; i < PTRS_PER_PGD; i++) {
272
pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
273
274
if (!pud_present(*pud))
275
continue;
276
pmd = pgd_page_vaddr(kernel_pg_dir[i]);
277
if (pmd > last)
278
last = pmd;
279
}
280
281
last_pmd_table = (pmd_t *)last;
282
#ifdef DEBUG
283
printk("kernel_ptr_init: %p\n", last_pmd_table);
284
#endif
285
}
286
287
last_pmd_table += PTRS_PER_PMD;
288
if (PAGE_ALIGNED(last_pmd_table)) {
289
last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
290
if (!last_pmd_table)
291
panic("%s: Failed to allocate %lu bytes align=%lx\n",
292
__func__, PAGE_SIZE, PAGE_SIZE);
293
294
clear_page(last_pmd_table);
295
mmu_page_ctor(last_pmd_table);
296
}
297
298
return last_pmd_table;
299
}
300
301
static void __init map_node(int node)
302
{
303
unsigned long physaddr, virtaddr, size;
304
pgd_t *pgd_dir;
305
p4d_t *p4d_dir;
306
pud_t *pud_dir;
307
pmd_t *pmd_dir;
308
pte_t *pte_dir;
309
310
size = m68k_memory[node].size;
311
physaddr = m68k_memory[node].addr;
312
virtaddr = (unsigned long)phys_to_virt(physaddr);
313
physaddr |= m68k_supervisor_cachemode |
314
_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
315
if (CPU_IS_040_OR_060)
316
physaddr |= _PAGE_GLOBAL040;
317
318
while (size > 0) {
319
#ifdef DEBUG
320
if (!(virtaddr & (PMD_SIZE-1)))
321
printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
322
virtaddr);
323
#endif
324
pgd_dir = pgd_offset_k(virtaddr);
325
if (virtaddr && CPU_IS_020_OR_030) {
326
if (!(virtaddr & (PGDIR_SIZE-1)) &&
327
size >= PGDIR_SIZE) {
328
#ifdef DEBUG
329
printk ("[very early term]");
330
#endif
331
pgd_val(*pgd_dir) = physaddr;
332
size -= PGDIR_SIZE;
333
virtaddr += PGDIR_SIZE;
334
physaddr += PGDIR_SIZE;
335
continue;
336
}
337
}
338
p4d_dir = p4d_offset(pgd_dir, virtaddr);
339
pud_dir = pud_offset(p4d_dir, virtaddr);
340
if (!pud_present(*pud_dir)) {
341
pmd_dir = kernel_ptr_table();
342
#ifdef DEBUG
343
printk ("[new pointer %p]", pmd_dir);
344
#endif
345
pud_set(pud_dir, pmd_dir);
346
} else
347
pmd_dir = pmd_offset(pud_dir, virtaddr);
348
349
if (CPU_IS_020_OR_030) {
350
if (virtaddr) {
351
#ifdef DEBUG
352
printk ("[early term]");
353
#endif
354
pmd_val(*pmd_dir) = physaddr;
355
physaddr += PMD_SIZE;
356
} else {
357
int i;
358
#ifdef DEBUG
359
printk ("[zero map]");
360
#endif
361
pte_dir = kernel_page_table();
362
pmd_set(pmd_dir, pte_dir);
363
364
pte_val(*pte_dir++) = 0;
365
physaddr += PAGE_SIZE;
366
for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
367
pte_val(*pte_dir++) = physaddr;
368
}
369
size -= PMD_SIZE;
370
virtaddr += PMD_SIZE;
371
} else {
372
if (!pmd_present(*pmd_dir)) {
373
#ifdef DEBUG
374
printk ("[new table]");
375
#endif
376
pte_dir = kernel_page_table();
377
pmd_set(pmd_dir, pte_dir);
378
}
379
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
380
381
if (virtaddr) {
382
if (!pte_present(*pte_dir))
383
pte_val(*pte_dir) = physaddr;
384
} else
385
pte_val(*pte_dir) = 0;
386
size -= PAGE_SIZE;
387
virtaddr += PAGE_SIZE;
388
physaddr += PAGE_SIZE;
389
}
390
391
}
392
#ifdef DEBUG
393
printk("\n");
394
#endif
395
}
396
397
/*
398
* Alternate definitions that are compile time constants, for
399
* initializing protection_map. The cachebits are fixed later.
400
*/
401
#define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
402
#define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
403
#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
404
#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
405
406
static pgprot_t protection_map[16] __ro_after_init = {
407
[VM_NONE] = PAGE_NONE_C,
408
[VM_READ] = PAGE_READONLY_C,
409
[VM_WRITE] = PAGE_COPY_C,
410
[VM_WRITE | VM_READ] = PAGE_COPY_C,
411
[VM_EXEC] = PAGE_READONLY_C,
412
[VM_EXEC | VM_READ] = PAGE_READONLY_C,
413
[VM_EXEC | VM_WRITE] = PAGE_COPY_C,
414
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_C,
415
[VM_SHARED] = PAGE_NONE_C,
416
[VM_SHARED | VM_READ] = PAGE_READONLY_C,
417
[VM_SHARED | VM_WRITE] = PAGE_SHARED_C,
418
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_C,
419
[VM_SHARED | VM_EXEC] = PAGE_READONLY_C,
420
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_C,
421
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_C,
422
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_C
423
};
424
DECLARE_VM_GET_PAGE_PROT
425
426
/*
427
* paging_init() continues the virtual memory environment setup which
428
* was begun by the code in arch/head.S.
429
*/
430
void __init paging_init(void)
431
{
432
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
433
unsigned long min_addr, max_addr;
434
unsigned long addr;
435
int i;
436
437
#ifdef DEBUG
438
printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
439
#endif
440
441
/* Fix the cache mode in the page descriptors for the 680[46]0. */
442
if (CPU_IS_040_OR_060) {
443
int i;
444
#ifndef mm_cachebits
445
mm_cachebits = _PAGE_CACHE040;
446
#endif
447
for (i = 0; i < 16; i++)
448
pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
449
}
450
451
min_addr = m68k_memory[0].addr;
452
max_addr = min_addr + m68k_memory[0].size - 1;
453
memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
454
MEMBLOCK_NONE);
455
for (i = 1; i < m68k_num_memory;) {
456
if (m68k_memory[i].addr < min_addr) {
457
printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
458
m68k_memory[i].addr, m68k_memory[i].size);
459
printk("Fix your bootloader or use a memfile to make use of this area!\n");
460
m68k_num_memory--;
461
memmove(m68k_memory + i, m68k_memory + i + 1,
462
(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
463
continue;
464
}
465
memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i,
466
MEMBLOCK_NONE);
467
addr = m68k_memory[i].addr + m68k_memory[i].size - 1;
468
if (addr > max_addr)
469
max_addr = addr;
470
i++;
471
}
472
m68k_memoffset = min_addr - PAGE_OFFSET;
473
m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6;
474
475
module_fixup(NULL, __start_fixup, __stop_fixup);
476
flush_icache();
477
478
high_memory = phys_to_virt(max_addr) + 1;
479
480
min_low_pfn = availmem >> PAGE_SHIFT;
481
max_pfn = max_low_pfn = (max_addr >> PAGE_SHIFT) + 1;
482
483
/* Reserve kernel text/data/bss and the memory allocated in head.S */
484
memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
485
486
/*
487
* Map the physical memory available into the kernel virtual
488
* address space. Make sure memblock will not try to allocate
489
* pages beyond the memory we already mapped in head.S
490
*/
491
memblock_set_bottom_up(true);
492
493
for (i = 0; i < m68k_num_memory; i++) {
494
m68k_setup_node(i);
495
map_node(i);
496
}
497
498
flush_tlb_all();
499
500
early_memtest(min_addr, max_addr);
501
502
/*
503
* initialize the bad page table and bad page to point
504
* to a couple of allocated pages
505
*/
506
empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
507
508
/*
509
* Set up SFC/DFC registers
510
*/
511
set_fc(USER_DATA);
512
513
#ifdef DEBUG
514
printk ("before free_area_init\n");
515
#endif
516
for (i = 0; i < m68k_num_memory; i++)
517
if (node_present_pages(i))
518
node_set_state(i, N_NORMAL_MEMORY);
519
520
max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
521
free_area_init(max_zone_pfn);
522
}
523
524