Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sh/mm/init.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* linux/arch/sh/mm/init.c
4
*
5
* Copyright (C) 1999 Niibe Yutaka
6
* Copyright (C) 2002 - 2011 Paul Mundt
7
*
8
* Based on linux/arch/i386/mm/init.c:
9
* Copyright (C) 1995 Linus Torvalds
10
*/
11
#include <linux/mm.h>
12
#include <linux/swap.h>
13
#include <linux/init.h>
14
#include <linux/gfp.h>
15
#include <linux/memblock.h>
16
#include <linux/proc_fs.h>
17
#include <linux/pagemap.h>
18
#include <linux/percpu.h>
19
#include <linux/io.h>
20
#include <linux/dma-mapping.h>
21
#include <linux/export.h>
22
#include <asm/mmu_context.h>
23
#include <asm/mmzone.h>
24
#include <asm/kexec.h>
25
#include <asm/tlb.h>
26
#include <asm/cacheflush.h>
27
#include <asm/sections.h>
28
#include <asm/setup.h>
29
#include <asm/cache.h>
30
#include <asm/pgalloc.h>
31
#include <linux/sizes.h>
32
#include "ioremap.h"
33
34
pgd_t swapper_pg_dir[PTRS_PER_PGD];
35
36
void __init generic_mem_init(void)
37
{
38
memblock_add(__MEMORY_START, __MEMORY_SIZE);
39
}
40
41
void __init __weak plat_mem_setup(void)
42
{
43
/* Nothing to see here, move along. */
44
}
45
46
#ifdef CONFIG_MMU
47
static pte_t *__get_pte_phys(unsigned long addr)
48
{
49
pgd_t *pgd;
50
p4d_t *p4d;
51
pud_t *pud;
52
pmd_t *pmd;
53
54
pgd = pgd_offset_k(addr);
55
if (pgd_none(*pgd)) {
56
pgd_ERROR(*pgd);
57
return NULL;
58
}
59
60
p4d = p4d_alloc(NULL, pgd, addr);
61
if (unlikely(!p4d)) {
62
p4d_ERROR(*p4d);
63
return NULL;
64
}
65
66
pud = pud_alloc(NULL, p4d, addr);
67
if (unlikely(!pud)) {
68
pud_ERROR(*pud);
69
return NULL;
70
}
71
72
pmd = pmd_alloc(NULL, pud, addr);
73
if (unlikely(!pmd)) {
74
pmd_ERROR(*pmd);
75
return NULL;
76
}
77
78
return pte_offset_kernel(pmd, addr);
79
}
80
81
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
82
{
83
pte_t *pte;
84
85
pte = __get_pte_phys(addr);
86
if (!pte_none(*pte)) {
87
pte_ERROR(*pte);
88
return;
89
}
90
91
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
92
local_flush_tlb_one(get_asid(), addr);
93
94
if (pgprot_val(prot) & _PAGE_WIRED)
95
tlb_wire_entry(NULL, addr, *pte);
96
}
97
98
static void clear_pte_phys(unsigned long addr, pgprot_t prot)
99
{
100
pte_t *pte;
101
102
pte = __get_pte_phys(addr);
103
104
if (pgprot_val(prot) & _PAGE_WIRED)
105
tlb_unwire_entry();
106
107
set_pte(pte, pfn_pte(0, __pgprot(0)));
108
local_flush_tlb_one(get_asid(), addr);
109
}
110
111
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
112
{
113
unsigned long address = __fix_to_virt(idx);
114
115
if (idx >= __end_of_fixed_addresses) {
116
BUG();
117
return;
118
}
119
120
set_pte_phys(address, phys, prot);
121
}
122
123
void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
124
{
125
unsigned long address = __fix_to_virt(idx);
126
127
if (idx >= __end_of_fixed_addresses) {
128
BUG();
129
return;
130
}
131
132
clear_pte_phys(address, prot);
133
}
134
135
static pmd_t * __init one_md_table_init(pud_t *pud)
136
{
137
if (pud_none(*pud)) {
138
pmd_t *pmd;
139
140
pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
141
pud_populate(&init_mm, pud, pmd);
142
BUG_ON(pmd != pmd_offset(pud, 0));
143
}
144
145
return pmd_offset(pud, 0);
146
}
147
148
static pte_t * __init one_page_table_init(pmd_t *pmd)
149
{
150
if (pmd_none(*pmd)) {
151
pte_t *pte;
152
153
pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
154
pmd_populate_kernel(&init_mm, pmd, pte);
155
BUG_ON(pte != pte_offset_kernel(pmd, 0));
156
}
157
158
return pte_offset_kernel(pmd, 0);
159
}
160
161
static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
162
unsigned long vaddr, pte_t *lastpte)
163
{
164
return pte;
165
}
166
167
void __init page_table_range_init(unsigned long start, unsigned long end,
168
pgd_t *pgd_base)
169
{
170
pgd_t *pgd;
171
pud_t *pud;
172
pmd_t *pmd;
173
pte_t *pte = NULL;
174
int i, j, k;
175
unsigned long vaddr;
176
177
vaddr = start;
178
i = pgd_index(vaddr);
179
j = pud_index(vaddr);
180
k = pmd_index(vaddr);
181
pgd = pgd_base + i;
182
183
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
184
pud = (pud_t *)pgd;
185
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
186
pmd = one_md_table_init(pud);
187
#ifndef __PAGETABLE_PMD_FOLDED
188
pmd += k;
189
#endif
190
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
191
pte = page_table_kmap_check(one_page_table_init(pmd),
192
pmd, vaddr, pte);
193
vaddr += PMD_SIZE;
194
}
195
k = 0;
196
}
197
j = 0;
198
}
199
}
200
#endif /* CONFIG_MMU */
201
202
void __init allocate_pgdat(unsigned int nid)
203
{
204
unsigned long start_pfn, end_pfn;
205
206
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
207
208
#ifdef CONFIG_NUMA
209
alloc_node_data(nid);
210
#endif
211
212
NODE_DATA(nid)->node_start_pfn = start_pfn;
213
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
214
}
215
216
static void __init do_init_bootmem(void)
217
{
218
unsigned long start_pfn, end_pfn;
219
int i;
220
221
/* Add active regions with valid PFNs. */
222
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
223
__add_active_range(0, start_pfn, end_pfn);
224
225
/* All of system RAM sits in node 0 for the non-NUMA case */
226
allocate_pgdat(0);
227
node_set_online(0);
228
229
plat_mem_setup();
230
231
sparse_init();
232
}
233
234
static void __init early_reserve_mem(void)
235
{
236
unsigned long start_pfn;
237
u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
238
u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
239
240
/*
241
* Partially used pages are not usable - thus
242
* we are rounding upwards:
243
*/
244
start_pfn = PFN_UP(__pa(_end));
245
246
/*
247
* Reserve the kernel text and Reserve the bootmem bitmap. We do
248
* this in two steps (first step was init_bootmem()), because
249
* this catches the (definitely buggy) case of us accidentally
250
* initializing the bootmem allocator with an invalid RAM area.
251
*/
252
memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
253
254
/*
255
* Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
256
*/
257
if (CONFIG_ZERO_PAGE_OFFSET != 0)
258
memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
259
260
/*
261
* Handle additional early reservations
262
*/
263
check_for_initrd();
264
reserve_crashkernel();
265
}
266
267
void __init paging_init(void)
268
{
269
unsigned long max_zone_pfns[MAX_NR_ZONES];
270
unsigned long vaddr, end;
271
272
sh_mv.mv_mem_init();
273
274
early_reserve_mem();
275
276
/*
277
* Once the early reservations are out of the way, give the
278
* platforms a chance to kick out some memory.
279
*/
280
if (sh_mv.mv_mem_reserve)
281
sh_mv.mv_mem_reserve();
282
283
memblock_enforce_memory_limit(memory_limit);
284
memblock_allow_resize();
285
286
memblock_dump_all();
287
288
/*
289
* Determine low and high memory ranges:
290
*/
291
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
292
min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
293
294
nodes_clear(node_online_map);
295
296
memory_start = (unsigned long)__va(__MEMORY_START);
297
memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
298
299
uncached_init();
300
pmb_init();
301
do_init_bootmem();
302
ioremap_fixed_init();
303
304
/* We don't need to map the kernel through the TLB, as
305
* it is permanatly mapped using P1. So clear the
306
* entire pgd. */
307
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
308
309
/* Set an initial value for the MMU.TTB so we don't have to
310
* check for a null value. */
311
set_TTB(swapper_pg_dir);
312
313
/*
314
* Populate the relevant portions of swapper_pg_dir so that
315
* we can use the fixmap entries without calling kmalloc.
316
* pte's will be filled in by __set_fixmap().
317
*/
318
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
319
end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
320
page_table_range_init(vaddr, end, swapper_pg_dir);
321
322
kmap_coherent_init();
323
324
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
325
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
326
free_area_init(max_zone_pfns);
327
}
328
329
unsigned int mem_init_done = 0;
330
331
void __init mem_init(void)
332
{
333
/* Set this up early, so we can take care of the zero page */
334
cpu_cache_init();
335
336
/* clear the zero-page */
337
memset(empty_zero_page, 0, PAGE_SIZE);
338
__flush_wback_region(empty_zero_page, PAGE_SIZE);
339
340
vsyscall_init();
341
342
pr_info("virtual kernel memory layout:\n"
343
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
344
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
345
" lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
346
#ifdef CONFIG_UNCACHED_MAPPING
347
" : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
348
#endif
349
" .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
350
" .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
351
" .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
352
FIXADDR_START, FIXADDR_TOP,
353
(FIXADDR_TOP - FIXADDR_START) >> 10,
354
355
(unsigned long)VMALLOC_START, VMALLOC_END,
356
(VMALLOC_END - VMALLOC_START) >> 20,
357
358
(unsigned long)memory_start, (unsigned long)high_memory,
359
((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
360
361
#ifdef CONFIG_UNCACHED_MAPPING
362
uncached_start, uncached_end, uncached_size >> 20,
363
#endif
364
365
(unsigned long)&__init_begin, (unsigned long)&__init_end,
366
((unsigned long)&__init_end -
367
(unsigned long)&__init_begin) >> 10,
368
369
(unsigned long)&_etext, (unsigned long)&_edata,
370
((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
371
372
(unsigned long)&_text, (unsigned long)&_etext,
373
((unsigned long)&_etext - (unsigned long)&_text) >> 10);
374
375
mem_init_done = 1;
376
}
377
378