Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/mm/memblock.c
10817 views
1
#include <linux/kernel.h>
2
#include <linux/types.h>
3
#include <linux/init.h>
4
#include <linux/bitops.h>
5
#include <linux/memblock.h>
6
#include <linux/bootmem.h>
7
#include <linux/mm.h>
8
#include <linux/range.h>
9
10
/* Check for already reserved areas */
11
bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
12
{
13
struct memblock_region *r;
14
u64 addr = *addrp, last;
15
u64 size = *sizep;
16
bool changed = false;
17
18
again:
19
last = addr + size;
20
for_each_memblock(reserved, r) {
21
if (last > r->base && addr < r->base) {
22
size = r->base - addr;
23
changed = true;
24
goto again;
25
}
26
if (last > (r->base + r->size) && addr < (r->base + r->size)) {
27
addr = round_up(r->base + r->size, align);
28
size = last - addr;
29
changed = true;
30
goto again;
31
}
32
if (last <= (r->base + r->size) && addr >= r->base) {
33
*sizep = 0;
34
return false;
35
}
36
}
37
if (changed) {
38
*addrp = addr;
39
*sizep = size;
40
}
41
return changed;
42
}
43
44
/*
45
* Find next free range after start, and size is returned in *sizep
46
*/
47
u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
48
{
49
struct memblock_region *r;
50
51
for_each_memblock(memory, r) {
52
u64 ei_start = r->base;
53
u64 ei_last = ei_start + r->size;
54
u64 addr;
55
56
addr = round_up(ei_start, align);
57
if (addr < start)
58
addr = round_up(start, align);
59
if (addr >= ei_last)
60
continue;
61
*sizep = ei_last - addr;
62
while (memblock_x86_check_reserved_size(&addr, sizep, align))
63
;
64
65
if (*sizep)
66
return addr;
67
}
68
69
return MEMBLOCK_ERROR;
70
}
71
72
static __init struct range *find_range_array(int count)
73
{
74
u64 end, size, mem;
75
struct range *range;
76
77
size = sizeof(struct range) * count;
78
end = memblock.current_limit;
79
80
mem = memblock_find_in_range(0, end, size, sizeof(struct range));
81
if (mem == MEMBLOCK_ERROR)
82
panic("can not find more space for range array");
83
84
/*
85
* This range is tempoaray, so don't reserve it, it will not be
86
* overlapped because We will not alloccate new buffer before
87
* We discard this one
88
*/
89
range = __va(mem);
90
memset(range, 0, size);
91
92
return range;
93
}
94
95
static void __init memblock_x86_subtract_reserved(struct range *range, int az)
96
{
97
u64 final_start, final_end;
98
struct memblock_region *r;
99
100
/* Take out region array itself at first*/
101
memblock_free_reserved_regions();
102
103
memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
104
105
for_each_memblock(reserved, r) {
106
memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
107
final_start = PFN_DOWN(r->base);
108
final_end = PFN_UP(r->base + r->size);
109
if (final_start >= final_end)
110
continue;
111
subtract_range(range, az, final_start, final_end);
112
}
113
114
/* Put region array back ? */
115
memblock_reserve_reserved_regions();
116
}
117
118
struct count_data {
119
int nr;
120
};
121
122
static int __init count_work_fn(unsigned long start_pfn,
123
unsigned long end_pfn, void *datax)
124
{
125
struct count_data *data = datax;
126
127
data->nr++;
128
129
return 0;
130
}
131
132
static int __init count_early_node_map(int nodeid)
133
{
134
struct count_data data;
135
136
data.nr = 0;
137
work_with_active_regions(nodeid, count_work_fn, &data);
138
139
return data.nr;
140
}
141
142
int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
143
unsigned long start_pfn, unsigned long end_pfn)
144
{
145
int count;
146
struct range *range;
147
int nr_range;
148
149
count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
150
151
range = find_range_array(count);
152
nr_range = 0;
153
154
/*
155
* Use early_node_map[] and memblock.reserved.region to get range array
156
* at first
157
*/
158
nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
159
subtract_range(range, count, 0, start_pfn);
160
subtract_range(range, count, end_pfn, -1ULL);
161
162
memblock_x86_subtract_reserved(range, count);
163
nr_range = clean_sort_range(range, count);
164
165
*rangep = range;
166
return nr_range;
167
}
168
169
int __init get_free_all_memory_range(struct range **rangep, int nodeid)
170
{
171
unsigned long end_pfn = -1UL;
172
173
#ifdef CONFIG_X86_32
174
end_pfn = max_low_pfn;
175
#endif
176
return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn);
177
}
178
179
static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
180
{
181
int i, count;
182
struct range *range;
183
int nr_range;
184
u64 final_start, final_end;
185
u64 free_size;
186
struct memblock_region *r;
187
188
count = (memblock.reserved.cnt + memblock.memory.cnt) * 2;
189
190
range = find_range_array(count);
191
nr_range = 0;
192
193
addr = PFN_UP(addr);
194
limit = PFN_DOWN(limit);
195
196
for_each_memblock(memory, r) {
197
final_start = PFN_UP(r->base);
198
final_end = PFN_DOWN(r->base + r->size);
199
if (final_start >= final_end)
200
continue;
201
if (final_start >= limit || final_end <= addr)
202
continue;
203
204
nr_range = add_range(range, count, nr_range, final_start, final_end);
205
}
206
subtract_range(range, count, 0, addr);
207
subtract_range(range, count, limit, -1ULL);
208
209
/* Subtract memblock.reserved.region in range ? */
210
if (!get_free)
211
goto sort_and_count_them;
212
for_each_memblock(reserved, r) {
213
final_start = PFN_DOWN(r->base);
214
final_end = PFN_UP(r->base + r->size);
215
if (final_start >= final_end)
216
continue;
217
if (final_start >= limit || final_end <= addr)
218
continue;
219
220
subtract_range(range, count, final_start, final_end);
221
}
222
223
sort_and_count_them:
224
nr_range = clean_sort_range(range, count);
225
226
free_size = 0;
227
for (i = 0; i < nr_range; i++)
228
free_size += range[i].end - range[i].start;
229
230
return free_size << PAGE_SHIFT;
231
}
232
233
u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit)
234
{
235
return __memblock_x86_memory_in_range(addr, limit, true);
236
}
237
238
u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit)
239
{
240
return __memblock_x86_memory_in_range(addr, limit, false);
241
}
242
243
void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
244
{
245
if (start == end)
246
return;
247
248
if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
249
return;
250
251
memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
252
253
memblock_reserve(start, end - start);
254
}
255
256
void __init memblock_x86_free_range(u64 start, u64 end)
257
{
258
if (start == end)
259
return;
260
261
if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
262
return;
263
264
memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
265
266
memblock_free(start, end - start);
267
}
268
269
/*
270
* Need to call this function after memblock_x86_register_active_regions,
271
* so early_node_map[] is filled already.
272
*/
273
u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align)
274
{
275
u64 addr;
276
addr = find_memory_core_early(nid, size, align, start, end);
277
if (addr != MEMBLOCK_ERROR)
278
return addr;
279
280
/* Fallback, should already have start end within node range */
281
return memblock_find_in_range(start, end, size, align);
282
}
283
284
/*
285
* Finds an active region in the address range from start_pfn to last_pfn and
286
* returns its range in ei_startpfn and ei_endpfn for the memblock entry.
287
*/
288
static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
289
unsigned long start_pfn,
290
unsigned long last_pfn,
291
unsigned long *ei_startpfn,
292
unsigned long *ei_endpfn)
293
{
294
u64 align = PAGE_SIZE;
295
296
*ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
297
*ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
298
299
/* Skip map entries smaller than a page */
300
if (*ei_startpfn >= *ei_endpfn)
301
return 0;
302
303
/* Skip if map is outside the node */
304
if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
305
return 0;
306
307
/* Check for overlaps */
308
if (*ei_startpfn < start_pfn)
309
*ei_startpfn = start_pfn;
310
if (*ei_endpfn > last_pfn)
311
*ei_endpfn = last_pfn;
312
313
return 1;
314
}
315
316
/* Walk the memblock.memory map and register active regions within a node */
317
void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
318
unsigned long last_pfn)
319
{
320
unsigned long ei_startpfn;
321
unsigned long ei_endpfn;
322
struct memblock_region *r;
323
324
for_each_memblock(memory, r)
325
if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
326
&ei_startpfn, &ei_endpfn))
327
add_active_range(nid, ei_startpfn, ei_endpfn);
328
}
329
330
/*
331
* Find the hole size (in bytes) in the memory range.
332
* @start: starting address of the memory range to scan
333
* @end: ending address of the memory range to scan
334
*/
335
u64 __init memblock_x86_hole_size(u64 start, u64 end)
336
{
337
unsigned long start_pfn = start >> PAGE_SHIFT;
338
unsigned long last_pfn = end >> PAGE_SHIFT;
339
unsigned long ei_startpfn, ei_endpfn, ram = 0;
340
struct memblock_region *r;
341
342
for_each_memblock(memory, r)
343
if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
344
&ei_startpfn, &ei_endpfn))
345
ram += ei_endpfn - ei_startpfn;
346
347
return end - start - ((u64)ram << PAGE_SHIFT);
348
}
349
350