Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/um/kernel/mem.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4
*/
5
6
#include <linux/stddef.h>
7
#include <linux/module.h>
8
#include <linux/memblock.h>
9
#include <linux/mm.h>
10
#include <linux/swap.h>
11
#include <linux/slab.h>
12
#include <linux/init.h>
13
#include <asm/sections.h>
14
#include <asm/page.h>
15
#include <asm/pgalloc.h>
16
#include <as-layout.h>
17
#include <init.h>
18
#include <kern.h>
19
#include <kern_util.h>
20
#include <mem_user.h>
21
#include <os.h>
22
#include <um_malloc.h>
23
#include <linux/sched/task.h>
24
25
#ifdef CONFIG_KASAN
26
int kasan_um_is_ready;
27
void kasan_init(void)
28
{
29
/*
30
* kasan_map_memory will map all of the required address space and
31
* the host machine will allocate physical memory as necessary.
32
*/
33
kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE);
34
init_task.kasan_depth = 0;
35
kasan_um_is_ready = true;
36
}
37
38
static void (*kasan_init_ptr)(void)
39
__section(".kasan_init") __used
40
= kasan_init;
41
#endif
42
43
/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
44
unsigned long *empty_zero_page = NULL;
45
EXPORT_SYMBOL(empty_zero_page);
46
47
/*
48
* Initialized during boot, and readonly for initializing page tables
49
* afterwards
50
*/
51
pgd_t swapper_pg_dir[PTRS_PER_PGD];
52
53
/* Initialized at boot time, and readonly after that */
54
int kmalloc_ok = 0;
55
56
/* Used during early boot */
57
static unsigned long brk_end;
58
59
void __init arch_mm_preinit(void)
60
{
61
/* clear the zero-page */
62
memset(empty_zero_page, 0, PAGE_SIZE);
63
64
/* Map in the area just after the brk now that kmalloc is about
65
* to be turned on.
66
*/
67
brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
68
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
69
memblock_free((void *)brk_end, uml_reserved - brk_end);
70
uml_reserved = brk_end;
71
min_low_pfn = PFN_UP(__pa(uml_reserved));
72
max_pfn = max_low_pfn;
73
}
74
75
void __init mem_init(void)
76
{
77
kmalloc_ok = 1;
78
}
79
80
#if IS_ENABLED(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA)
81
/*
82
* Create a page table and place a pointer to it in a middle page
83
* directory entry.
84
*/
85
static void __init one_page_table_init(pmd_t *pmd)
86
{
87
if (pmd_none(*pmd)) {
88
pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
89
PAGE_SIZE);
90
if (!pte)
91
panic("%s: Failed to allocate %lu bytes align=%lx\n",
92
__func__, PAGE_SIZE, PAGE_SIZE);
93
94
set_pmd(pmd, __pmd(_KERNPG_TABLE +
95
(unsigned long) __pa(pte)));
96
BUG_ON(pte != pte_offset_kernel(pmd, 0));
97
}
98
}
99
100
static void __init one_md_table_init(pud_t *pud)
101
{
102
#if CONFIG_PGTABLE_LEVELS > 2
103
pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
104
if (!pmd_table)
105
panic("%s: Failed to allocate %lu bytes align=%lx\n",
106
__func__, PAGE_SIZE, PAGE_SIZE);
107
108
set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
109
BUG_ON(pmd_table != pmd_offset(pud, 0));
110
#endif
111
}
112
113
static void __init one_ud_table_init(p4d_t *p4d)
114
{
115
#if CONFIG_PGTABLE_LEVELS > 3
116
pud_t *pud_table = (pud_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
117
if (!pud_table)
118
panic("%s: Failed to allocate %lu bytes align=%lx\n",
119
__func__, PAGE_SIZE, PAGE_SIZE);
120
121
set_p4d(p4d, __p4d(_KERNPG_TABLE + (unsigned long) __pa(pud_table)));
122
BUG_ON(pud_table != pud_offset(p4d, 0));
123
#endif
124
}
125
126
static void __init fixrange_init(unsigned long start, unsigned long end,
127
pgd_t *pgd_base)
128
{
129
pgd_t *pgd;
130
p4d_t *p4d;
131
pud_t *pud;
132
pmd_t *pmd;
133
int i, j;
134
unsigned long vaddr;
135
136
vaddr = start;
137
i = pgd_index(vaddr);
138
j = pmd_index(vaddr);
139
pgd = pgd_base + i;
140
141
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
142
p4d = p4d_offset(pgd, vaddr);
143
if (p4d_none(*p4d))
144
one_ud_table_init(p4d);
145
pud = pud_offset(p4d, vaddr);
146
if (pud_none(*pud))
147
one_md_table_init(pud);
148
pmd = pmd_offset(pud, vaddr);
149
for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
150
one_page_table_init(pmd);
151
vaddr += PMD_SIZE;
152
}
153
j = 0;
154
}
155
}
156
157
static void __init fixaddr_user_init( void)
158
{
159
long size = FIXADDR_USER_END - FIXADDR_USER_START;
160
pte_t *pte;
161
phys_t p;
162
unsigned long v, vaddr = FIXADDR_USER_START;
163
164
if (!size)
165
return;
166
167
fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
168
v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
169
if (!v)
170
panic("%s: Failed to allocate %lu bytes align=%lx\n",
171
__func__, size, PAGE_SIZE);
172
173
memcpy((void *) v , (void *) FIXADDR_USER_START, size);
174
p = __pa(v);
175
for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
176
p += PAGE_SIZE) {
177
pte = virt_to_kpte(vaddr);
178
pte_set_val(*pte, p, PAGE_READONLY);
179
}
180
}
181
#endif
182
183
void __init paging_init(void)
184
{
185
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
186
187
empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
188
PAGE_SIZE);
189
if (!empty_zero_page)
190
panic("%s: Failed to allocate %lu bytes align=%lx\n",
191
__func__, PAGE_SIZE, PAGE_SIZE);
192
193
max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
194
free_area_init(max_zone_pfn);
195
196
#if IS_ENABLED(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA)
197
fixaddr_user_init();
198
#endif
199
}
200
201
/*
202
* This can't do anything because nothing in the kernel image can be freed
203
* since it's not in kernel physical memory.
204
*/
205
206
void free_initmem(void)
207
{
208
}
209
210
/* Allocate and free page tables. */
211
212
pgd_t *pgd_alloc(struct mm_struct *mm)
213
{
214
pgd_t *pgd = __pgd_alloc(mm, 0);
215
216
if (pgd)
217
memcpy(pgd + USER_PTRS_PER_PGD,
218
swapper_pg_dir + USER_PTRS_PER_PGD,
219
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
220
221
return pgd;
222
}
223
224
void *uml_kmalloc(int size, int flags)
225
{
226
return kmalloc(size, flags);
227
}
228
229
static const pgprot_t protection_map[16] = {
230
[VM_NONE] = PAGE_NONE,
231
[VM_READ] = PAGE_READONLY,
232
[VM_WRITE] = PAGE_COPY,
233
[VM_WRITE | VM_READ] = PAGE_COPY,
234
[VM_EXEC] = PAGE_READONLY,
235
[VM_EXEC | VM_READ] = PAGE_READONLY,
236
[VM_EXEC | VM_WRITE] = PAGE_COPY,
237
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
238
[VM_SHARED] = PAGE_NONE,
239
[VM_SHARED | VM_READ] = PAGE_READONLY,
240
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
241
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
242
[VM_SHARED | VM_EXEC] = PAGE_READONLY,
243
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
244
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
245
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
246
};
247
DECLARE_VM_GET_PAGE_PROT
248
249
void mark_rodata_ro(void)
250
{
251
unsigned long rodata_start = PFN_ALIGN(__start_rodata);
252
unsigned long rodata_end = PFN_ALIGN(__end_rodata);
253
254
os_protect_memory((void *)rodata_start, rodata_end - rodata_start, 1, 0, 0);
255
}
256
257