Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/mm/kaslr.c
26444 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* This file implements KASLR memory randomization for x86_64. It randomizes
4
* the virtual address space of kernel memory regions (physical memory
5
* mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
6
* exploits relying on predictable kernel addresses.
7
*
8
* Entropy is generated using the KASLR early boot functions now shared in
9
* the lib directory (originally written by Kees Cook). Randomization is
10
* done on PGD & P4D/PUD page table levels to increase possible addresses.
11
* The physical memory mapping code was adapted to support P4D/PUD level
12
* virtual addresses. This implementation on the best configuration provides
13
* 30,000 possible virtual addresses in average for each memory region.
14
* An additional low memory page is used to ensure each CPU can start with
15
* a PGD aligned virtual address (for realmode).
16
*
17
* The order of each memory region is not changed. The feature looks at
18
* the available space for the regions based on different configuration
19
* options and randomizes the base and space between each. The size of the
20
* physical memory mapping is the available physical memory.
21
*/
22
23
#include <linux/kernel.h>
24
#include <linux/init.h>
25
#include <linux/prandom.h>
26
#include <linux/memblock.h>
27
#include <linux/pgtable.h>
28
29
#include <asm/setup.h>
30
#include <asm/kaslr.h>
31
32
#include "mm_internal.h"
33
34
#define TB_SHIFT 40
35
36
/*
37
* The end address could depend on more configuration options to make the
38
* highest amount of space for randomization available, but that's too hard
39
* to keep straight and caused issues already.
40
*/
41
static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
42
43
/*
44
* Memory regions randomized by KASLR (except modules that use a separate logic
45
* earlier during boot). The list is ordered based on virtual addresses. This
46
* order is kept after randomization.
47
*/
48
static __initdata struct kaslr_memory_region {
49
unsigned long *base;
50
unsigned long *end;
51
unsigned long size_tb;
52
} kaslr_regions[] = {
53
{
54
.base = &page_offset_base,
55
.end = &direct_map_physmem_end,
56
},
57
{
58
.base = &vmalloc_base,
59
},
60
{
61
.base = &vmemmap_base,
62
},
63
};
64
65
/*
66
* The end of the physical address space that can be mapped directly by the
67
* kernel. This starts out at (1<<MAX_PHYSMEM_BITS) - 1), but KASLR may reduce
68
* that in order to increase the available entropy for mapping other regions.
69
*/
70
unsigned long direct_map_physmem_end __ro_after_init;
71
72
/* Get size in bytes used by the memory region */
73
static inline unsigned long get_padding(struct kaslr_memory_region *region)
74
{
75
return (region->size_tb << TB_SHIFT);
76
}
77
78
/* Initialize base and padding for each memory region randomized with KASLR */
79
void __init kernel_randomize_memory(void)
80
{
81
size_t i;
82
unsigned long vaddr_start, vaddr;
83
unsigned long rand, memory_tb;
84
struct rnd_state rand_state;
85
unsigned long remain_entropy;
86
unsigned long vmemmap_size;
87
88
vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
89
vaddr = vaddr_start;
90
91
/*
92
* These BUILD_BUG_ON checks ensure the memory layout is consistent
93
* with the vaddr_start/vaddr_end variables. These checks are very
94
* limited....
95
*/
96
BUILD_BUG_ON(vaddr_start >= vaddr_end);
97
BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
98
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
99
100
/* Preset the end of the possible address space for physical memory */
101
direct_map_physmem_end = ((1ULL << MAX_PHYSMEM_BITS) - 1);
102
if (!kaslr_memory_enabled())
103
return;
104
105
kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
106
kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
107
108
/*
109
* Update Physical memory mapping to available and
110
* add padding if needed (especially for memory hotplug support).
111
*/
112
BUG_ON(kaslr_regions[0].base != &page_offset_base);
113
memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
114
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
115
116
/*
117
* Adapt physical memory region size based on available memory,
118
* except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
119
* device BAR space assuming the direct map space is large enough
120
* for creating a ZONE_DEVICE mapping in the direct map corresponding
121
* to the physical BAR address.
122
*/
123
if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
124
kaslr_regions[0].size_tb = memory_tb;
125
126
/*
127
* Calculate the vmemmap region size in TBs, aligned to a TB
128
* boundary.
129
*/
130
vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
131
sizeof(struct page);
132
kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
133
134
/* Calculate entropy available between regions */
135
remain_entropy = vaddr_end - vaddr_start;
136
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
137
remain_entropy -= get_padding(&kaslr_regions[i]);
138
139
prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
140
141
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {
142
unsigned long entropy;
143
144
/*
145
* Select a random virtual address using the extra entropy
146
* available.
147
*/
148
entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
149
prandom_bytes_state(&rand_state, &rand, sizeof(rand));
150
entropy = (rand % (entropy + 1)) & PUD_MASK;
151
vaddr += entropy;
152
*kaslr_regions[i].base = vaddr;
153
154
/* Calculate the end of the region */
155
vaddr += get_padding(&kaslr_regions[i]);
156
/*
157
* KASLR trims the maximum possible size of the
158
* direct-map. Update the direct_map_physmem_end boundary.
159
* No rounding required as the region starts
160
* PUD aligned and size is in units of TB.
161
*/
162
if (kaslr_regions[i].end)
163
*kaslr_regions[i].end = __pa_nodebug(vaddr - 1);
164
165
/* Add a minimum padding based on randomization alignment. */
166
vaddr = round_up(vaddr + 1, PUD_SIZE);
167
remain_entropy -= entropy;
168
}
169
}
170
171
void __meminit init_trampoline_kaslr(void)
172
{
173
pud_t *pud_page_tramp, *pud, *pud_tramp;
174
p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;
175
unsigned long paddr, vaddr;
176
pgd_t *pgd;
177
178
pud_page_tramp = alloc_low_page();
179
180
/*
181
* There are two mappings for the low 1MB area, the direct mapping
182
* and the 1:1 mapping for the real mode trampoline:
183
*
184
* Direct mapping: virt_addr = phys_addr + PAGE_OFFSET
185
* 1:1 mapping: virt_addr = phys_addr
186
*/
187
paddr = 0;
188
vaddr = (unsigned long)__va(paddr);
189
pgd = pgd_offset_k(vaddr);
190
191
p4d = p4d_offset(pgd, vaddr);
192
pud = pud_offset(p4d, vaddr);
193
194
pud_tramp = pud_page_tramp + pud_index(paddr);
195
*pud_tramp = *pud;
196
197
if (pgtable_l5_enabled()) {
198
p4d_page_tramp = alloc_low_page();
199
200
p4d_tramp = p4d_page_tramp + p4d_index(paddr);
201
202
set_p4d(p4d_tramp,
203
__p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
204
205
trampoline_pgd_entry =
206
__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp));
207
} else {
208
trampoline_pgd_entry =
209
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp));
210
}
211
}
212
213