Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/head32.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* linux/arch/i386/kernel/head32.c -- prepare to run common code
4
*
5
* Copyright (C) 2000 Andrea Arcangeli <[email protected]> SuSE
6
* Copyright (C) 2007 Eric Biederman <[email protected]>
7
*/
8
9
#include <linux/init.h>
10
#include <linux/start_kernel.h>
11
#include <linux/mm.h>
12
#include <linux/memblock.h>
13
14
#include <asm/desc.h>
15
#include <asm/setup.h>
16
#include <asm/sections.h>
17
#include <asm/e820/api.h>
18
#include <asm/page.h>
19
#include <asm/apic.h>
20
#include <asm/io_apic.h>
21
#include <asm/bios_ebda.h>
22
#include <asm/microcode.h>
23
#include <asm/tlbflush.h>
24
#include <asm/bootparam_utils.h>
25
26
static void __init i386_default_early_setup(void)
27
{
28
/* Initialize 32bit specific setup functions */
29
x86_init.resources.reserve_resources = i386_reserve_resources;
30
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
31
}
32
33
#ifdef CONFIG_MICROCODE_INITRD32
34
unsigned long __initdata initrd_start_early;
35
static pte_t __initdata *initrd_pl2p_start, *initrd_pl2p_end;
36
37
static void zap_early_initrd_mapping(void)
38
{
39
pte_t *pl2p = initrd_pl2p_start;
40
41
for (; pl2p < initrd_pl2p_end; pl2p++) {
42
*pl2p = (pte_t){ .pte = 0 };
43
44
if (!IS_ENABLED(CONFIG_X86_PAE))
45
*(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = (pte_t) {.pte = 0};
46
}
47
}
48
#else
49
static inline void zap_early_initrd_mapping(void) { }
50
#endif
51
52
asmlinkage __visible void __init __noreturn i386_start_kernel(void)
53
{
54
/* Make sure IDT is set up before any exception happens */
55
idt_setup_early_handler();
56
57
load_ucode_bsp();
58
zap_early_initrd_mapping();
59
60
cr4_init_shadow();
61
62
sanitize_boot_params(&boot_params);
63
64
x86_early_init_platform_quirks();
65
66
/* Call the subarch specific early setup function */
67
switch (boot_params.hdr.hardware_subarch) {
68
case X86_SUBARCH_INTEL_MID:
69
x86_intel_mid_early_setup();
70
break;
71
case X86_SUBARCH_CE4100:
72
x86_ce4100_early_setup();
73
break;
74
default:
75
i386_default_early_setup();
76
break;
77
}
78
79
start_kernel();
80
}
81
82
/*
83
* Initialize page tables. This creates a PDE and a set of page
84
* tables, which are located immediately beyond __brk_base. The variable
85
* _brk_end is set up to point to the first "safe" location.
86
* Mappings are created both at virtual address 0 (identity mapping)
87
* and PAGE_OFFSET for up to _end.
88
*
89
* In PAE mode initial_page_table is statically defined to contain
90
* enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
91
* entries). The identity mapping is handled by pointing two PGD entries
92
* to the first kernel PMD. Note the upper half of each PMD or PTE are
93
* always zero at this stage.
94
*/
95
#ifdef CONFIG_X86_PAE
96
typedef pmd_t pl2_t;
97
#define pl2_base initial_pg_pmd
98
#define SET_PL2(val) { .pmd = (val), }
99
#else
100
typedef pgd_t pl2_t;
101
#define pl2_base initial_page_table
102
#define SET_PL2(val) { .pgd = (val), }
103
#endif
104
105
static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t **pl2p,
106
const unsigned long limit)
107
{
108
while ((pte.pte & PTE_PFN_MASK) < limit) {
109
pl2_t pl2 = SET_PL2((unsigned long)*ptep | PDE_IDENT_ATTR);
110
int i;
111
112
**pl2p = pl2;
113
if (!IS_ENABLED(CONFIG_X86_PAE)) {
114
/* Kernel PDE entry */
115
*(*pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
116
}
117
118
for (i = 0; i < PTRS_PER_PTE; i++) {
119
**ptep = pte;
120
pte.pte += PAGE_SIZE;
121
(*ptep)++;
122
}
123
(*pl2p)++;
124
}
125
return pte;
126
}
127
128
void __init __no_stack_protector mk_early_pgtbl_32(void)
129
{
130
/* Enough space to fit pagetables for the low memory linear map */
131
unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
132
pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base);
133
struct boot_params __maybe_unused *params;
134
pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base);
135
unsigned long *ptr;
136
137
pte.pte = PTE_IDENT_ATTR;
138
pte = init_map(pte, &ptep, &pl2p, limit);
139
140
ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped);
141
/* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */
142
*ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
143
144
ptr = (unsigned long *)__pa_nodebug(&_brk_end);
145
*ptr = (unsigned long)ptep + PAGE_OFFSET;
146
147
#ifdef CONFIG_MICROCODE_INITRD32
148
params = (struct boot_params *)__pa_nodebug(&boot_params);
149
if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
150
return;
151
152
/* Save the virtual start address */
153
ptr = (unsigned long *)__pa_nodebug(&initrd_start_early);
154
*ptr = (pte.pte & PTE_PFN_MASK) + PAGE_OFFSET;
155
*ptr += ((unsigned long)params->hdr.ramdisk_image) & ~PAGE_MASK;
156
157
/* Save PLP2 for cleanup */
158
ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_start);
159
*ptr = (unsigned long)pl2p + PAGE_OFFSET;
160
161
limit = (unsigned long)params->hdr.ramdisk_image;
162
pte.pte = PTE_IDENT_ATTR | PFN_ALIGN(limit);
163
limit = (unsigned long)params->hdr.ramdisk_image + params->hdr.ramdisk_size;
164
165
init_map(pte, &ptep, &pl2p, limit);
166
167
ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_end);
168
*ptr = (unsigned long)pl2p + PAGE_OFFSET;
169
#endif
170
}
171
172