Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/mm/init_32.c
26442 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
*
4
* Copyright (C) 1995 Linus Torvalds
5
*
6
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7
*/
8
9
#include <linux/signal.h>
10
#include <linux/sched.h>
11
#include <linux/kernel.h>
12
#include <linux/errno.h>
13
#include <linux/string.h>
14
#include <linux/types.h>
15
#include <linux/ptrace.h>
16
#include <linux/mman.h>
17
#include <linux/mm.h>
18
#include <linux/hugetlb.h>
19
#include <linux/swap.h>
20
#include <linux/smp.h>
21
#include <linux/init.h>
22
#include <linux/highmem.h>
23
#include <linux/pagemap.h>
24
#include <linux/pci.h>
25
#include <linux/pfn.h>
26
#include <linux/poison.h>
27
#include <linux/memblock.h>
28
#include <linux/proc_fs.h>
29
#include <linux/memory_hotplug.h>
30
#include <linux/initrd.h>
31
#include <linux/cpumask.h>
32
#include <linux/gfp.h>
33
34
#include <asm/asm.h>
35
#include <asm/bios_ebda.h>
36
#include <asm/processor.h>
37
#include <linux/uaccess.h>
38
#include <asm/dma.h>
39
#include <asm/fixmap.h>
40
#include <asm/e820/api.h>
41
#include <asm/apic.h>
42
#include <asm/bugs.h>
43
#include <asm/tlb.h>
44
#include <asm/tlbflush.h>
45
#include <asm/olpc_ofw.h>
46
#include <asm/pgalloc.h>
47
#include <asm/sections.h>
48
#include <asm/setup.h>
49
#include <asm/set_memory.h>
50
#include <asm/page_types.h>
51
#include <asm/cpu_entry_area.h>
52
#include <asm/init.h>
53
#include <asm/pgtable_areas.h>
54
#include <asm/numa.h>
55
56
#include "mm_internal.h"
57
58
unsigned long highstart_pfn, highend_pfn;
59
60
bool __read_mostly __vmalloc_start_set = false;
61
62
/*
63
* Creates a middle page table and puts a pointer to it in the
64
* given global directory entry. This only returns the gd entry
65
* in non-PAE compilation mode, since the middle layer is folded.
66
*/
67
static pmd_t * __init one_md_table_init(pgd_t *pgd)
68
{
69
p4d_t *p4d;
70
pud_t *pud;
71
pmd_t *pmd_table;
72
73
#ifdef CONFIG_X86_PAE
74
if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
75
pmd_table = (pmd_t *)alloc_low_page();
76
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
77
p4d = p4d_offset(pgd, 0);
78
pud = pud_offset(p4d, 0);
79
BUG_ON(pmd_table != pmd_offset(pud, 0));
80
81
return pmd_table;
82
}
83
#endif
84
p4d = p4d_offset(pgd, 0);
85
pud = pud_offset(p4d, 0);
86
pmd_table = pmd_offset(pud, 0);
87
88
return pmd_table;
89
}
90
91
/*
92
* Create a page table and place a pointer to it in a middle page
93
* directory entry:
94
*/
95
static pte_t * __init one_page_table_init(pmd_t *pmd)
96
{
97
if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
98
pte_t *page_table = (pte_t *)alloc_low_page();
99
100
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
101
BUG_ON(page_table != pte_offset_kernel(pmd, 0));
102
}
103
104
return pte_offset_kernel(pmd, 0);
105
}
106
107
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
108
{
109
int pgd_idx = pgd_index(vaddr);
110
int pmd_idx = pmd_index(vaddr);
111
112
return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
113
}
114
115
pte_t * __init populate_extra_pte(unsigned long vaddr)
116
{
117
int pte_idx = pte_index(vaddr);
118
pmd_t *pmd;
119
120
pmd = populate_extra_pmd(vaddr);
121
return one_page_table_init(pmd) + pte_idx;
122
}
123
124
static unsigned long __init
125
page_table_range_init_count(unsigned long start, unsigned long end)
126
{
127
unsigned long count = 0;
128
#ifdef CONFIG_HIGHMEM
129
int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
130
int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
131
int pgd_idx, pmd_idx;
132
unsigned long vaddr;
133
134
if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
135
return 0;
136
137
vaddr = start;
138
pgd_idx = pgd_index(vaddr);
139
pmd_idx = pmd_index(vaddr);
140
141
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
142
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
143
pmd_idx++) {
144
if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
145
(vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
146
count++;
147
vaddr += PMD_SIZE;
148
}
149
pmd_idx = 0;
150
}
151
#endif
152
return count;
153
}
154
155
static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
156
unsigned long vaddr, pte_t *lastpte,
157
void **adr)
158
{
159
#ifdef CONFIG_HIGHMEM
160
/*
161
* Something (early fixmap) may already have put a pte
162
* page here, which causes the page table allocation
163
* to become nonlinear. Attempt to fix it, and if it
164
* is still nonlinear then we have to bug.
165
*/
166
int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
167
int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
168
169
if (pmd_idx_kmap_begin != pmd_idx_kmap_end
170
&& (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
171
&& (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
172
pte_t *newpte;
173
int i;
174
175
BUG_ON(after_bootmem);
176
newpte = *adr;
177
for (i = 0; i < PTRS_PER_PTE; i++)
178
set_pte(newpte + i, pte[i]);
179
*adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
180
181
set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
182
BUG_ON(newpte != pte_offset_kernel(pmd, 0));
183
__flush_tlb_all();
184
185
pte = newpte;
186
}
187
BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
188
&& vaddr > fix_to_virt(FIX_KMAP_END)
189
&& lastpte && lastpte + PTRS_PER_PTE != pte);
190
#endif
191
return pte;
192
}
193
194
/*
195
* This function initializes a certain range of kernel virtual memory
196
* with new bootmem page tables, everywhere page tables are missing in
197
* the given range.
198
*
199
* NOTE: The pagetables are allocated contiguous on the physical space
200
* so we can cache the place of the first one and move around without
201
* checking the pgd every time.
202
*/
203
static void __init
204
page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
205
{
206
int pgd_idx, pmd_idx;
207
unsigned long vaddr;
208
pgd_t *pgd;
209
pmd_t *pmd;
210
pte_t *pte = NULL;
211
unsigned long count = page_table_range_init_count(start, end);
212
void *adr = NULL;
213
214
if (count)
215
adr = alloc_low_pages(count);
216
217
vaddr = start;
218
pgd_idx = pgd_index(vaddr);
219
pmd_idx = pmd_index(vaddr);
220
pgd = pgd_base + pgd_idx;
221
222
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
223
pmd = one_md_table_init(pgd);
224
pmd = pmd + pmd_index(vaddr);
225
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
226
pmd++, pmd_idx++) {
227
pte = page_table_kmap_check(one_page_table_init(pmd),
228
pmd, vaddr, pte, &adr);
229
230
vaddr += PMD_SIZE;
231
}
232
pmd_idx = 0;
233
}
234
}
235
236
static inline int is_x86_32_kernel_text(unsigned long addr)
237
{
238
if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
239
return 1;
240
return 0;
241
}
242
243
/*
244
* This maps the physical memory to kernel virtual address space, a total
245
* of max_low_pfn pages, by creating page tables starting from address
246
* PAGE_OFFSET:
247
*/
248
unsigned long __init
249
kernel_physical_mapping_init(unsigned long start,
250
unsigned long end,
251
unsigned long page_size_mask,
252
pgprot_t prot)
253
{
254
int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
255
unsigned long last_map_addr = end;
256
unsigned long start_pfn, end_pfn;
257
pgd_t *pgd_base = swapper_pg_dir;
258
int pgd_idx, pmd_idx, pte_ofs;
259
unsigned long pfn;
260
pgd_t *pgd;
261
pmd_t *pmd;
262
pte_t *pte;
263
unsigned pages_2m, pages_4k;
264
int mapping_iter;
265
266
start_pfn = start >> PAGE_SHIFT;
267
end_pfn = end >> PAGE_SHIFT;
268
269
/*
270
* First iteration will setup identity mapping using large/small pages
271
* based on use_pse, with other attributes same as set by
272
* the early code in head_32.S
273
*
274
* Second iteration will setup the appropriate attributes (NX, GLOBAL..)
275
* as desired for the kernel identity mapping.
276
*
277
* This two pass mechanism conforms to the TLB app note which says:
278
*
279
* "Software should not write to a paging-structure entry in a way
280
* that would change, for any linear address, both the page size
281
* and either the page frame or attributes."
282
*/
283
mapping_iter = 1;
284
285
if (!boot_cpu_has(X86_FEATURE_PSE))
286
use_pse = 0;
287
288
repeat:
289
pages_2m = pages_4k = 0;
290
pfn = start_pfn;
291
pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
292
pgd = pgd_base + pgd_idx;
293
for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
294
pmd = one_md_table_init(pgd);
295
296
if (pfn >= end_pfn)
297
continue;
298
#ifdef CONFIG_X86_PAE
299
pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
300
pmd += pmd_idx;
301
#else
302
pmd_idx = 0;
303
#endif
304
for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
305
pmd++, pmd_idx++) {
306
unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
307
308
/*
309
* Map with big pages if possible, otherwise
310
* create normal page tables:
311
*/
312
if (use_pse) {
313
unsigned int addr2;
314
pgprot_t prot = PAGE_KERNEL_LARGE;
315
/*
316
* first pass will use the same initial
317
* identity mapping attribute + _PAGE_PSE.
318
*/
319
pgprot_t init_prot =
320
__pgprot(PTE_IDENT_ATTR |
321
_PAGE_PSE);
322
323
pfn &= PMD_MASK >> PAGE_SHIFT;
324
addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
325
PAGE_OFFSET + PAGE_SIZE-1;
326
327
if (is_x86_32_kernel_text(addr) ||
328
is_x86_32_kernel_text(addr2))
329
prot = PAGE_KERNEL_LARGE_EXEC;
330
331
pages_2m++;
332
if (mapping_iter == 1)
333
set_pmd(pmd, pfn_pmd(pfn, init_prot));
334
else
335
set_pmd(pmd, pfn_pmd(pfn, prot));
336
337
pfn += PTRS_PER_PTE;
338
continue;
339
}
340
pte = one_page_table_init(pmd);
341
342
pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
343
pte += pte_ofs;
344
for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
345
pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
346
pgprot_t prot = PAGE_KERNEL;
347
/*
348
* first pass will use the same initial
349
* identity mapping attribute.
350
*/
351
pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
352
353
if (is_x86_32_kernel_text(addr))
354
prot = PAGE_KERNEL_EXEC;
355
356
pages_4k++;
357
if (mapping_iter == 1) {
358
set_pte(pte, pfn_pte(pfn, init_prot));
359
last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
360
} else
361
set_pte(pte, pfn_pte(pfn, prot));
362
}
363
}
364
}
365
if (mapping_iter == 1) {
366
/*
367
* update direct mapping page count only in the first
368
* iteration.
369
*/
370
update_page_count(PG_LEVEL_2M, pages_2m);
371
update_page_count(PG_LEVEL_4K, pages_4k);
372
373
/*
374
* local global flush tlb, which will flush the previous
375
* mappings present in both small and large page TLB's.
376
*/
377
__flush_tlb_all();
378
379
/*
380
* Second iteration will set the actual desired PTE attributes.
381
*/
382
mapping_iter = 2;
383
goto repeat;
384
}
385
return last_map_addr;
386
}
387
388
#ifdef CONFIG_HIGHMEM
389
static void __init permanent_kmaps_init(pgd_t *pgd_base)
390
{
391
unsigned long vaddr = PKMAP_BASE;
392
393
page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
394
395
pkmap_page_table = virt_to_kpte(vaddr);
396
}
397
#else
398
static inline void permanent_kmaps_init(pgd_t *pgd_base)
399
{
400
}
401
#endif /* CONFIG_HIGHMEM */
402
403
void __init sync_initial_page_table(void)
404
{
405
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
406
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
407
KERNEL_PGD_PTRS);
408
409
/*
410
* sync back low identity map too. It is used for example
411
* in the 32-bit EFI stub.
412
*/
413
clone_pgd_range(initial_page_table,
414
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
415
min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
416
}
417
418
void __init native_pagetable_init(void)
419
{
420
unsigned long pfn, va;
421
pgd_t *pgd, *base = swapper_pg_dir;
422
p4d_t *p4d;
423
pud_t *pud;
424
pmd_t *pmd;
425
pte_t *pte;
426
427
/*
428
* Remove any mappings which extend past the end of physical
429
* memory from the boot time page table.
430
* In virtual address space, we should have at least two pages
431
* from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
432
* definition. And max_low_pfn is set to VMALLOC_END physical
433
* address. If initial memory mapping is doing right job, we
434
* should have pte used near max_low_pfn or one pmd is not present.
435
*/
436
for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
437
va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
438
pgd = base + pgd_index(va);
439
if (!pgd_present(*pgd))
440
break;
441
442
p4d = p4d_offset(pgd, va);
443
pud = pud_offset(p4d, va);
444
pmd = pmd_offset(pud, va);
445
if (!pmd_present(*pmd))
446
break;
447
448
/* should not be large page here */
449
if (pmd_leaf(*pmd)) {
450
pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
451
pfn, pmd, __pa(pmd));
452
BUG_ON(1);
453
}
454
455
pte = pte_offset_kernel(pmd, va);
456
if (!pte_present(*pte))
457
break;
458
459
printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
460
pfn, pmd, __pa(pmd), pte, __pa(pte));
461
pte_clear(NULL, va, pte);
462
}
463
paging_init();
464
}
465
466
/*
467
* Build a proper pagetable for the kernel mappings. Up until this
468
* point, we've been running on some set of pagetables constructed by
469
* the boot process.
470
*
471
* This will be a pagetable constructed in arch/x86/kernel/head_32.S.
472
* The root of the pagetable will be swapper_pg_dir.
473
*
474
* In general, pagetable_init() assumes that the pagetable may already
475
* be partially populated, and so it avoids stomping on any existing
476
* mappings.
477
*/
478
void __init early_ioremap_page_table_range_init(void)
479
{
480
pgd_t *pgd_base = swapper_pg_dir;
481
unsigned long vaddr, end;
482
483
/*
484
* Fixed mappings, only the page table structure has to be
485
* created - mappings will be set by set_fixmap():
486
*/
487
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
488
end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
489
page_table_range_init(vaddr, end, pgd_base);
490
early_ioremap_reset();
491
}
492
493
static void __init pagetable_init(void)
494
{
495
pgd_t *pgd_base = swapper_pg_dir;
496
497
permanent_kmaps_init(pgd_base);
498
}
499
500
#define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL)
501
/* Bits supported by the hardware: */
502
pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK;
503
/* Bits allowed in normal kernel mappings: */
504
pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK;
505
EXPORT_SYMBOL_GPL(__supported_pte_mask);
506
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
507
EXPORT_SYMBOL(__default_kernel_pte_mask);
508
509
/* user-defined highmem size */
510
static unsigned int highmem_pages = -1;
511
512
/*
513
* highmem=size forces highmem to be exactly 'size' bytes.
514
* This works even on boxes that have no highmem otherwise.
515
* This also works to reduce highmem size on bigger boxes.
516
*/
517
static int __init parse_highmem(char *arg)
518
{
519
if (!arg)
520
return -EINVAL;
521
522
highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
523
return 0;
524
}
525
early_param("highmem", parse_highmem);
526
527
#define MSG_HIGHMEM_TOO_BIG \
528
"highmem size (%luMB) is bigger than pages available (%luMB)!\n"
529
530
#define MSG_LOWMEM_TOO_SMALL \
531
"highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
532
/*
533
* All of RAM fits into lowmem - but if user wants highmem
534
* artificially via the highmem=x boot parameter then create
535
* it:
536
*/
537
static void __init lowmem_pfn_init(void)
538
{
539
/* max_low_pfn is 0, we already have early_res support */
540
max_low_pfn = max_pfn;
541
542
if (highmem_pages == -1)
543
highmem_pages = 0;
544
#ifdef CONFIG_HIGHMEM
545
if (highmem_pages >= max_pfn) {
546
printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
547
pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
548
highmem_pages = 0;
549
}
550
if (highmem_pages) {
551
if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
552
printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
553
pages_to_mb(highmem_pages));
554
highmem_pages = 0;
555
}
556
max_low_pfn -= highmem_pages;
557
}
558
#else
559
if (highmem_pages)
560
printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
561
#endif
562
}
563
564
#define MSG_HIGHMEM_TOO_SMALL \
565
"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
566
567
#define MSG_HIGHMEM_TRIMMED \
568
"Warning: only 4GB will be used. Support for CONFIG_HIGHMEM64G was removed!\n"
569
/*
570
* We have more RAM than fits into lowmem - we try to put it into
571
* highmem, also taking the highmem=x boot parameter into account:
572
*/
573
static void __init highmem_pfn_init(void)
574
{
575
max_low_pfn = MAXMEM_PFN;
576
577
if (highmem_pages == -1)
578
highmem_pages = max_pfn - MAXMEM_PFN;
579
580
if (highmem_pages + MAXMEM_PFN < max_pfn)
581
max_pfn = MAXMEM_PFN + highmem_pages;
582
583
if (highmem_pages + MAXMEM_PFN > max_pfn) {
584
printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
585
pages_to_mb(max_pfn - MAXMEM_PFN),
586
pages_to_mb(highmem_pages));
587
highmem_pages = 0;
588
}
589
#ifndef CONFIG_HIGHMEM
590
/* Maximum memory usable is what is directly addressable */
591
printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
592
printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
593
max_pfn = MAXMEM_PFN;
594
#else /* !CONFIG_HIGHMEM */
595
if (max_pfn > MAX_NONPAE_PFN) {
596
max_pfn = MAX_NONPAE_PFN;
597
printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
598
}
599
#endif /* !CONFIG_HIGHMEM */
600
}
601
602
/*
603
* Determine low and high memory ranges:
604
*/
605
void __init find_low_pfn_range(void)
606
{
607
/* it could update max_pfn */
608
609
if (max_pfn <= MAXMEM_PFN)
610
lowmem_pfn_init();
611
else
612
highmem_pfn_init();
613
}
614
615
void __init initmem_init(void)
616
{
617
#ifdef CONFIG_HIGHMEM
618
highstart_pfn = highend_pfn = max_pfn;
619
if (max_pfn > max_low_pfn)
620
highstart_pfn = max_low_pfn;
621
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
622
pages_to_mb(highend_pfn - highstart_pfn));
623
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
624
#else
625
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
626
#endif
627
628
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
629
630
__vmalloc_start_set = true;
631
632
printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
633
pages_to_mb(max_low_pfn));
634
635
printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
636
max_pfn_mapped<<PAGE_SHIFT);
637
printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
638
}
639
640
/*
641
* paging_init() sets up the page tables - note that the first 8MB are
642
* already mapped by head.S.
643
*
644
* This routines also unmaps the page at virtual kernel address 0, so
645
* that we can trap those pesky NULL-reference errors in the kernel.
646
*/
647
void __init paging_init(void)
648
{
649
pagetable_init();
650
651
__flush_tlb_all();
652
653
/*
654
* NOTE: at this point the bootmem allocator is fully available.
655
*/
656
olpc_dt_build_devicetree();
657
sparse_init();
658
zone_sizes_init();
659
}
660
661
/*
662
* Test if the WP bit works in supervisor mode. It isn't supported on 386's
663
* and also on some strange 486's. All 586+'s are OK. This used to involve
664
* black magic jumps to work around some nasty CPU bugs, but fortunately the
665
* switch to using exceptions got rid of all that.
666
*/
667
static void __init test_wp_bit(void)
668
{
669
char z = 0;
670
671
printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode...");
672
673
__set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO);
674
675
if (copy_to_kernel_nofault((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) {
676
clear_fixmap(FIX_WP_TEST);
677
printk(KERN_CONT "Ok.\n");
678
return;
679
}
680
681
printk(KERN_CONT "No.\n");
682
panic("Linux doesn't support CPUs with broken WP.");
683
}
684
685
void __init arch_mm_preinit(void)
686
{
687
pci_iommu_alloc();
688
689
#ifdef CONFIG_FLATMEM
690
BUG_ON(!mem_map);
691
#endif
692
}
693
694
void __init mem_init(void)
695
{
696
after_bootmem = 1;
697
x86_init.hyper.init_after_bootmem();
698
699
/*
700
* Check boundaries twice: Some fundamental inconsistencies can
701
* be detected at build time already.
702
*/
703
#define __FIXADDR_TOP (-PAGE_SIZE)
704
#ifdef CONFIG_HIGHMEM
705
BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
706
BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
707
#endif
708
#define high_memory (-128UL << 20)
709
BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
710
#undef high_memory
711
#undef __FIXADDR_TOP
712
713
#ifdef CONFIG_HIGHMEM
714
BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
715
BUG_ON(VMALLOC_END > PKMAP_BASE);
716
#endif
717
BUG_ON(VMALLOC_START >= VMALLOC_END);
718
BUG_ON((unsigned long)high_memory > VMALLOC_START);
719
720
test_wp_bit();
721
}
722
723
int kernel_set_to_readonly __read_mostly;
724
725
static void mark_nxdata_nx(void)
726
{
727
/*
728
* When this called, init has already been executed and released,
729
* so everything past _etext should be NX.
730
*/
731
unsigned long start = PFN_ALIGN(_etext);
732
/*
733
* This comes from is_x86_32_kernel_text upper limit. Also HPAGE where used:
734
*/
735
unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
736
737
if (__supported_pte_mask & _PAGE_NX)
738
printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
739
set_memory_nx(start, size >> PAGE_SHIFT);
740
}
741
742
void mark_rodata_ro(void)
743
{
744
unsigned long start = PFN_ALIGN(_text);
745
unsigned long size = (unsigned long)__end_rodata - start;
746
747
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
748
pr_info("Write protecting kernel text and read-only data: %luk\n",
749
size >> 10);
750
751
kernel_set_to_readonly = 1;
752
753
#ifdef CONFIG_CPA_DEBUG
754
pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size);
755
set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
756
757
pr_info("Testing CPA: write protecting again\n");
758
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
759
#endif
760
mark_nxdata_nx();
761
}
762
763