Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/boot/physmem_info.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
#define boot_fmt(fmt) "physmem: " fmt
3
#include <linux/processor.h>
4
#include <linux/errno.h>
5
#include <linux/init.h>
6
#include <asm/physmem_info.h>
7
#include <asm/stacktrace.h>
8
#include <asm/boot_data.h>
9
#include <asm/sparsemem.h>
10
#include <asm/sections.h>
11
#include <asm/setup.h>
12
#include <asm/sclp.h>
13
#include <asm/asm.h>
14
#include <asm/uv.h>
15
#include "decompressor.h"
16
#include "boot.h"
17
18
struct physmem_info __bootdata(physmem_info);
19
static unsigned int physmem_alloc_ranges;
20
static unsigned long physmem_alloc_pos;
21
22
/* up to 256 storage elements, 1020 subincrements each */
23
#define ENTRIES_EXTENDED_MAX \
24
(256 * (1020 / 2) * sizeof(struct physmem_range))
25
26
static struct physmem_range *__get_physmem_range_ptr(u32 n)
27
{
28
if (n < MEM_INLINED_ENTRIES)
29
return &physmem_info.online[n];
30
if (unlikely(!physmem_info.online_extended)) {
31
physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
32
RR_MEM_DETECT_EXT, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
33
physmem_alloc_pos, true);
34
}
35
return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
36
}
37
38
/*
39
* sequential calls to add_physmem_online_range with adjacent memory ranges
40
* are merged together into single memory range.
41
*/
42
void add_physmem_online_range(u64 start, u64 end)
43
{
44
struct physmem_range *range;
45
46
if (physmem_info.range_count) {
47
range = __get_physmem_range_ptr(physmem_info.range_count - 1);
48
if (range->end == start) {
49
range->end = end;
50
return;
51
}
52
}
53
54
range = __get_physmem_range_ptr(physmem_info.range_count);
55
range->start = start;
56
range->end = end;
57
physmem_info.range_count++;
58
}
59
60
static int __diag260(unsigned long rx1, unsigned long rx2)
61
{
62
union register_pair rx;
63
int cc, exception;
64
unsigned long ry;
65
66
rx.even = rx1;
67
rx.odd = rx2;
68
ry = 0x10; /* storage configuration */
69
exception = 1;
70
asm_inline volatile(
71
" diag %[rx],%[ry],0x260\n"
72
"0: lhi %[exc],0\n"
73
"1:\n"
74
CC_IPM(cc)
75
EX_TABLE(0b, 1b)
76
: CC_OUT(cc, cc), [exc] "+d" (exception), [ry] "+d" (ry)
77
: [rx] "d" (rx.pair)
78
: CC_CLOBBER_LIST("memory"));
79
cc = exception ? -1 : CC_TRANSFORM(cc);
80
return cc == 0 ? ry : -1;
81
}
82
83
static int diag260(void)
84
{
85
int rc, i;
86
87
struct {
88
unsigned long start;
89
unsigned long end;
90
} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
91
92
memset(storage_extents, 0, sizeof(storage_extents));
93
rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
94
if (rc == -1)
95
return -1;
96
97
for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
98
add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
99
return 0;
100
}
101
102
#define DIAG500_SC_STOR_LIMIT 4
103
104
static int diag500_storage_limit(unsigned long *max_physmem_end)
105
{
106
unsigned long storage_limit;
107
108
asm_inline volatile(
109
" lghi %%r1,%[subcode]\n"
110
" lghi %%r2,0\n"
111
" diag %%r2,%%r4,0x500\n"
112
"0: lgr %[slimit],%%r2\n"
113
EX_TABLE(0b, 0b)
114
: [slimit] "=d" (storage_limit)
115
: [subcode] "i" (DIAG500_SC_STOR_LIMIT)
116
: "memory", "1", "2");
117
if (!storage_limit)
118
return -EINVAL;
119
/* Convert inclusive end to exclusive end */
120
*max_physmem_end = storage_limit + 1;
121
return 0;
122
}
123
124
static int tprot(unsigned long addr)
125
{
126
int cc, exception;
127
128
exception = 1;
129
asm_inline volatile(
130
" tprot 0(%[addr]),0\n"
131
"0: lhi %[exc],0\n"
132
"1:\n"
133
CC_IPM(cc)
134
EX_TABLE(0b, 1b)
135
: CC_OUT(cc, cc), [exc] "+d" (exception)
136
: [addr] "a" (addr)
137
: CC_CLOBBER_LIST("memory"));
138
cc = exception ? -EFAULT : CC_TRANSFORM(cc);
139
return cc;
140
}
141
142
static unsigned long search_mem_end(void)
143
{
144
unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
145
unsigned long offset = 0;
146
unsigned long pivot;
147
148
while (range > 1) {
149
range >>= 1;
150
pivot = offset + range;
151
if (!tprot(pivot << 20))
152
offset = pivot;
153
}
154
return (offset + 1) << 20;
155
}
156
157
unsigned long detect_max_physmem_end(void)
158
{
159
unsigned long max_physmem_end = 0;
160
161
if (!diag500_storage_limit(&max_physmem_end)) {
162
physmem_info.info_source = MEM_DETECT_DIAG500_STOR_LIMIT;
163
} else if (!sclp_early_get_memsize(&max_physmem_end)) {
164
physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
165
} else {
166
max_physmem_end = search_mem_end();
167
physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
168
}
169
boot_debug("Max physical memory: 0x%016lx (info source: %s)\n", max_physmem_end,
170
get_physmem_info_source());
171
return max_physmem_end;
172
}
173
174
void detect_physmem_online_ranges(unsigned long max_physmem_end)
175
{
176
unsigned long start, end;
177
int i;
178
179
if (!sclp_early_read_storage_info()) {
180
physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
181
} else if (physmem_info.info_source == MEM_DETECT_DIAG500_STOR_LIMIT) {
182
unsigned long online_end;
183
184
if (!sclp_early_get_memsize(&online_end)) {
185
physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
186
add_physmem_online_range(0, online_end);
187
}
188
} else if (!diag260()) {
189
physmem_info.info_source = MEM_DETECT_DIAG260;
190
} else if (max_physmem_end) {
191
add_physmem_online_range(0, max_physmem_end);
192
}
193
boot_debug("Online memory ranges (info source: %s):\n", get_physmem_info_source());
194
for_each_physmem_online_range(i, &start, &end)
195
boot_debug(" online [%d]: 0x%016lx-0x%016lx\n", i, start, end);
196
}
197
198
void physmem_set_usable_limit(unsigned long limit)
199
{
200
physmem_info.usable = limit;
201
physmem_alloc_pos = limit;
202
boot_debug("Usable memory limit: 0x%016lx\n", limit);
203
}
204
205
static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
206
{
207
unsigned long start, end, total_mem = 0, total_reserved_mem = 0;
208
struct reserved_range *range;
209
enum reserved_range_type t;
210
int i;
211
212
boot_emerg("Linux version %s\n", kernel_version);
213
if (!is_prot_virt_guest() && early_command_line[0])
214
boot_emerg("Kernel command line: %s\n", early_command_line);
215
boot_emerg("Out of memory allocating %lu bytes 0x%lx aligned in range %lx:%lx\n",
216
size, align, min, max);
217
boot_emerg("Reserved memory ranges:\n");
218
for_each_physmem_reserved_range(t, range, &start, &end) {
219
boot_emerg("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
220
total_reserved_mem += end - start;
221
}
222
boot_emerg("Usable online memory ranges (info source: %s [%d]):\n",
223
get_physmem_info_source(), physmem_info.info_source);
224
for_each_physmem_usable_range(i, &start, &end) {
225
boot_emerg("%016lx %016lx\n", start, end);
226
total_mem += end - start;
227
}
228
boot_emerg("Usable online memory total: %lu Reserved: %lu Free: %lu\n",
229
total_mem, total_reserved_mem,
230
total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
231
print_stacktrace(current_frame_address());
232
boot_emerg(" -- System halted\n");
233
disabled_wait();
234
}
235
236
static void _physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
237
{
238
physmem_info.reserved[type].start = addr;
239
physmem_info.reserved[type].end = addr + size;
240
}
241
242
void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
243
{
244
_physmem_reserve(type, addr, size);
245
boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Reserve:", addr, addr + size,
246
get_rr_type_name(type));
247
}
248
249
void physmem_free(enum reserved_range_type type)
250
{
251
boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Free:", physmem_info.reserved[type].start,
252
physmem_info.reserved[type].end, get_rr_type_name(type));
253
physmem_info.reserved[type].start = 0;
254
physmem_info.reserved[type].end = 0;
255
}
256
257
static bool __physmem_alloc_intersects(unsigned long addr, unsigned long size,
258
unsigned long *intersection_start)
259
{
260
unsigned long res_addr, res_size;
261
int t;
262
263
for (t = 0; t < RR_MAX; t++) {
264
if (!get_physmem_reserved(t, &res_addr, &res_size))
265
continue;
266
if (intersects(addr, size, res_addr, res_size)) {
267
*intersection_start = res_addr;
268
return true;
269
}
270
}
271
return ipl_report_certs_intersects(addr, size, intersection_start);
272
}
273
274
static unsigned long __physmem_alloc_range(unsigned long size, unsigned long align,
275
unsigned long min, unsigned long max,
276
unsigned int from_ranges, unsigned int *ranges_left,
277
bool die_on_oom)
278
{
279
unsigned int nranges = from_ranges ?: physmem_info.range_count;
280
unsigned long range_start, range_end;
281
unsigned long intersection_start;
282
unsigned long addr, pos = max;
283
284
align = max(align, 8UL);
285
while (nranges) {
286
__get_physmem_range(nranges - 1, &range_start, &range_end, false);
287
pos = min(range_end, pos);
288
289
if (round_up(min, align) + size > pos)
290
break;
291
addr = round_down(pos - size, align);
292
if (range_start > addr) {
293
nranges--;
294
continue;
295
}
296
if (__physmem_alloc_intersects(addr, size, &intersection_start)) {
297
pos = intersection_start;
298
continue;
299
}
300
301
if (ranges_left)
302
*ranges_left = nranges;
303
return addr;
304
}
305
if (die_on_oom)
306
die_oom(size, align, min, max);
307
return 0;
308
}
309
310
unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
311
unsigned long align, unsigned long min, unsigned long max,
312
bool die_on_oom)
313
{
314
unsigned long addr;
315
316
max = min(max, physmem_alloc_pos);
317
addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
318
if (addr)
319
_physmem_reserve(type, addr, size);
320
boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Alloc range:", addr, addr + size,
321
get_rr_type_name(type));
322
return addr;
323
}
324
325
unsigned long physmem_alloc(enum reserved_range_type type, unsigned long size,
326
unsigned long align, bool die_on_oom)
327
{
328
struct reserved_range *range = &physmem_info.reserved[type];
329
struct reserved_range *new_range = NULL;
330
unsigned int ranges_left;
331
unsigned long addr;
332
333
addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
334
&ranges_left, die_on_oom);
335
if (!addr)
336
return 0;
337
/* if not a consecutive allocation of the same type or first allocation */
338
if (range->start != addr + size) {
339
if (range->end) {
340
addr = __physmem_alloc_range(sizeof(struct reserved_range), 0, 0,
341
physmem_alloc_pos, physmem_alloc_ranges,
342
&ranges_left, true);
343
new_range = (struct reserved_range *)addr;
344
addr = __physmem_alloc_range(size, align, 0, addr, ranges_left,
345
&ranges_left, die_on_oom);
346
if (!addr)
347
return 0;
348
*new_range = *range;
349
range->chain = new_range;
350
}
351
range->end = addr + size;
352
}
353
if (type != RR_VMEM) {
354
boot_debug("%-14s 0x%016lx-0x%016lx %-20s align 0x%lx split %d\n", "Alloc topdown:",
355
addr, addr + size, get_rr_type_name(type), align, !!new_range);
356
}
357
range->start = addr;
358
physmem_alloc_pos = addr;
359
physmem_alloc_ranges = ranges_left;
360
return addr;
361
}
362
363
unsigned long physmem_alloc_or_die(enum reserved_range_type type, unsigned long size,
364
unsigned long align)
365
{
366
return physmem_alloc(type, size, align, true);
367
}
368
369
unsigned long get_physmem_alloc_pos(void)
370
{
371
return physmem_alloc_pos;
372
}
373
374
void dump_physmem_reserved(void)
375
{
376
struct reserved_range *range;
377
enum reserved_range_type t;
378
unsigned long start, end;
379
380
boot_debug("Reserved memory ranges:\n");
381
for_each_physmem_reserved_range(t, range, &start, &end) {
382
if (end) {
383
boot_debug("%-14s 0x%016lx-0x%016lx @%012lx chain %012lx\n",
384
get_rr_type_name(t), start, end, (unsigned long)range,
385
(unsigned long)range->chain);
386
}
387
}
388
}
389
390