Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/alpha/mm/init.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* linux/arch/alpha/mm/init.c
4
*
5
* Copyright (C) 1995 Linus Torvalds
6
*/
7
8
/* 2.3.x zone allocator, 1999 Andrea Arcangeli <[email protected]> */
9
10
#include <linux/pagemap.h>
11
#include <linux/signal.h>
12
#include <linux/sched.h>
13
#include <linux/kernel.h>
14
#include <linux/errno.h>
15
#include <linux/string.h>
16
#include <linux/types.h>
17
#include <linux/ptrace.h>
18
#include <linux/mman.h>
19
#include <linux/mm.h>
20
#include <linux/swap.h>
21
#include <linux/init.h>
22
#include <linux/memblock.h> /* max_low_pfn */
23
#include <linux/vmalloc.h>
24
#include <linux/gfp.h>
25
26
#include <linux/uaccess.h>
27
#include <asm/pgalloc.h>
28
#include <asm/hwrpb.h>
29
#include <asm/dma.h>
30
#include <asm/mmu_context.h>
31
#include <asm/console.h>
32
#include <asm/tlb.h>
33
#include <asm/setup.h>
34
#include <asm/sections.h>
35
36
#include "../kernel/proto.h"
37
38
static struct pcb_struct original_pcb;
39
40
pgd_t *
41
pgd_alloc(struct mm_struct *mm)
42
{
43
pgd_t *ret, *init;
44
45
ret = __pgd_alloc(mm, 0);
46
init = pgd_offset(&init_mm, 0UL);
47
if (ret) {
48
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
49
memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
50
(PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
51
#else
52
pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
53
#endif
54
55
/* The last PGD entry is the VPTB self-map. */
56
pgd_val(ret[PTRS_PER_PGD-1])
57
= pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
58
}
59
return ret;
60
}
61
62
63
/*
64
* BAD_PAGE is the page that is used for page faults when linux
65
* is out-of-memory. Older versions of linux just did a
66
* do_exit(), but using this instead means there is less risk
67
* for a process dying in kernel mode, possibly leaving an inode
68
* unused etc..
69
*
70
* BAD_PAGETABLE is the accompanying page-table: it is initialized
71
* to point to BAD_PAGE entries.
72
*
73
* ZERO_PAGE is a special page that is used for zero-initialized
74
* data and COW.
75
*/
76
pmd_t *
77
__bad_pagetable(void)
78
{
79
memset(absolute_pointer(EMPTY_PGT), 0, PAGE_SIZE);
80
return (pmd_t *) EMPTY_PGT;
81
}
82
83
pte_t
84
__bad_page(void)
85
{
86
memset(absolute_pointer(EMPTY_PGE), 0, PAGE_SIZE);
87
return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
88
}
89
90
static inline unsigned long
91
load_PCB(struct pcb_struct *pcb)
92
{
93
register unsigned long sp __asm__("$30");
94
pcb->ksp = sp;
95
return __reload_thread(pcb);
96
}
97
98
/* Set up initial PCB, VPTB, and other such nicities. */
99
100
static inline void
101
switch_to_system_map(void)
102
{
103
unsigned long newptbr;
104
unsigned long original_pcb_ptr;
105
106
/* Initialize the kernel's page tables. Linux puts the vptb in
107
the last slot of the L1 page table. */
108
memset(swapper_pg_dir, 0, PAGE_SIZE);
109
newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
110
pgd_val(swapper_pg_dir[1023]) =
111
(newptbr << 32) | pgprot_val(PAGE_KERNEL);
112
113
/* Set the vptb. This is often done by the bootloader, but
114
shouldn't be required. */
115
if (hwrpb->vptb != 0xfffffffe00000000UL) {
116
wrvptptr(0xfffffffe00000000UL);
117
hwrpb->vptb = 0xfffffffe00000000UL;
118
hwrpb_update_checksum(hwrpb);
119
}
120
121
/* Also set up the real kernel PCB while we're at it. */
122
init_thread_info.pcb.ptbr = newptbr;
123
init_thread_info.pcb.flags = 1; /* set FEN, clear everything else */
124
original_pcb_ptr = load_PCB(&init_thread_info.pcb);
125
tbia();
126
127
/* Save off the contents of the original PCB so that we can
128
restore the original console's page tables for a clean reboot.
129
130
Note that the PCB is supposed to be a physical address, but
131
since KSEG values also happen to work, folks get confused.
132
Check this here. */
133
134
if (original_pcb_ptr < PAGE_OFFSET) {
135
original_pcb_ptr = (unsigned long)
136
phys_to_virt(original_pcb_ptr);
137
}
138
original_pcb = *(struct pcb_struct *) original_pcb_ptr;
139
}
140
141
int callback_init_done;
142
143
void * __init
144
callback_init(void * kernel_end)
145
{
146
struct crb_struct * crb;
147
pgd_t *pgd;
148
p4d_t *p4d;
149
pud_t *pud;
150
pmd_t *pmd;
151
void *two_pages;
152
153
/* Starting at the HWRPB, locate the CRB. */
154
crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
155
156
if (alpha_using_srm) {
157
/* Tell the console whither it is to be remapped. */
158
if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
159
__halt(); /* "We're boned." --Bender */
160
161
/* Edit the procedure descriptors for DISPATCH and FIXUP. */
162
crb->dispatch_va = (struct procdesc_struct *)
163
(VMALLOC_START + (unsigned long)crb->dispatch_va
164
- crb->map[0].va);
165
crb->fixup_va = (struct procdesc_struct *)
166
(VMALLOC_START + (unsigned long)crb->fixup_va
167
- crb->map[0].va);
168
}
169
170
switch_to_system_map();
171
172
/* Allocate one PGD and one PMD. In the case of SRM, we'll need
173
these to actually remap the console. There is an assumption
174
here that only one of each is needed, and this allows for 8MB.
175
On systems with larger consoles, additional pages will be
176
allocated as needed during the mapping process.
177
178
In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
179
we need to allocate the PGD we use for vmalloc before we start
180
forking other tasks. */
181
182
two_pages = (void *)
183
(((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
184
kernel_end = two_pages + 2*PAGE_SIZE;
185
memset(two_pages, 0, 2*PAGE_SIZE);
186
187
pgd = pgd_offset_k(VMALLOC_START);
188
p4d = p4d_offset(pgd, VMALLOC_START);
189
pud = pud_offset(p4d, VMALLOC_START);
190
pud_set(pud, (pmd_t *)two_pages);
191
pmd = pmd_offset(pud, VMALLOC_START);
192
pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
193
194
if (alpha_using_srm) {
195
static struct vm_struct console_remap_vm;
196
unsigned long nr_pages = 0;
197
unsigned long vaddr;
198
unsigned long i, j;
199
200
/* calculate needed size */
201
for (i = 0; i < crb->map_entries; ++i)
202
nr_pages += crb->map[i].count;
203
204
/* register the vm area */
205
console_remap_vm.flags = VM_ALLOC;
206
console_remap_vm.size = nr_pages << PAGE_SHIFT;
207
vm_area_register_early(&console_remap_vm, PAGE_SIZE);
208
209
vaddr = (unsigned long)console_remap_vm.addr;
210
211
/* Set up the third level PTEs and update the virtual
212
addresses of the CRB entries. */
213
for (i = 0; i < crb->map_entries; ++i) {
214
unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
215
crb->map[i].va = vaddr;
216
for (j = 0; j < crb->map[i].count; ++j) {
217
/* Newer consoles (especially on larger
218
systems) may require more pages of
219
PTEs. Grab additional pages as needed. */
220
if (pmd != pmd_offset(pud, vaddr)) {
221
memset(kernel_end, 0, PAGE_SIZE);
222
pmd = pmd_offset(pud, vaddr);
223
pmd_set(pmd, (pte_t *)kernel_end);
224
kernel_end += PAGE_SIZE;
225
}
226
set_pte(pte_offset_kernel(pmd, vaddr),
227
pfn_pte(pfn, PAGE_KERNEL));
228
pfn++;
229
vaddr += PAGE_SIZE;
230
}
231
}
232
}
233
234
callback_init_done = 1;
235
return kernel_end;
236
}
237
238
/*
239
* paging_init() sets up the memory map.
240
*/
241
void __init paging_init(void)
242
{
243
unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
244
unsigned long dma_pfn;
245
246
dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
247
max_pfn = max_low_pfn;
248
249
max_zone_pfn[ZONE_DMA] = dma_pfn;
250
max_zone_pfn[ZONE_NORMAL] = max_pfn;
251
252
/* Initialize mem_map[]. */
253
free_area_init(max_zone_pfn);
254
255
/* Initialize the kernel's ZERO_PGE. */
256
memset(absolute_pointer(ZERO_PGE), 0, PAGE_SIZE);
257
}
258
259
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
260
void
261
srm_paging_stop (void)
262
{
263
/* Move the vptb back to where the SRM console expects it. */
264
swapper_pg_dir[1] = swapper_pg_dir[1023];
265
tbia();
266
wrvptptr(0x200000000UL);
267
hwrpb->vptb = 0x200000000UL;
268
hwrpb_update_checksum(hwrpb);
269
270
/* Reload the page tables that the console had in use. */
271
load_PCB(&original_pcb);
272
tbia();
273
}
274
#endif
275
276
static const pgprot_t protection_map[16] = {
277
[VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW |
278
_PAGE_FOR),
279
[VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW),
280
[VM_WRITE] = _PAGE_P(_PAGE_FOE),
281
[VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE),
282
[VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR),
283
[VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW),
284
[VM_EXEC | VM_WRITE] = _PAGE_P(0),
285
[VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0),
286
[VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW |
287
_PAGE_FOR),
288
[VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW),
289
[VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE),
290
[VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE),
291
[VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR),
292
[VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW),
293
[VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0),
294
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0)
295
};
296
DECLARE_VM_GET_PAGE_PROT
297
298