Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/ia64/kernel/efi.c
10817 views
1
/*
2
* Extensible Firmware Interface
3
*
4
* Based on Extensible Firmware Interface Specification version 0.9
5
* April 30, 1999
6
*
7
* Copyright (C) 1999 VA Linux Systems
8
* Copyright (C) 1999 Walt Drummond <[email protected]>
9
* Copyright (C) 1999-2003 Hewlett-Packard Co.
10
* David Mosberger-Tang <[email protected]>
11
* Stephane Eranian <[email protected]>
12
* (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
13
* Bjorn Helgaas <[email protected]>
14
*
15
* All EFI Runtime Services are not implemented yet as EFI only
16
* supports physical mode addressing on SoftSDV. This is to be fixed
17
* in a future version. --drummond 1999-07-20
18
*
19
* Implemented EFI runtime services and virtual mode calls. --davidm
20
*
21
* Goutham Rao: <[email protected]>
22
* Skip non-WB memory and ignore empty memory ranges.
23
*/
24
#include <linux/module.h>
25
#include <linux/bootmem.h>
26
#include <linux/crash_dump.h>
27
#include <linux/kernel.h>
28
#include <linux/init.h>
29
#include <linux/types.h>
30
#include <linux/slab.h>
31
#include <linux/time.h>
32
#include <linux/efi.h>
33
#include <linux/kexec.h>
34
#include <linux/mm.h>
35
36
#include <asm/io.h>
37
#include <asm/kregs.h>
38
#include <asm/meminit.h>
39
#include <asm/pgtable.h>
40
#include <asm/processor.h>
41
#include <asm/mca.h>
42
#include <asm/tlbflush.h>
43
44
#define EFI_DEBUG 0
45
46
extern efi_status_t efi_call_phys (void *, ...);
47
48
struct efi efi;
49
EXPORT_SYMBOL(efi);
50
static efi_runtime_services_t *runtime;
51
static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
52
53
#define efi_call_virt(f, args...) (*(f))(args)
54
55
#define STUB_GET_TIME(prefix, adjust_arg) \
56
static efi_status_t \
57
prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
58
{ \
59
struct ia64_fpreg fr[6]; \
60
efi_time_cap_t *atc = NULL; \
61
efi_status_t ret; \
62
\
63
if (tc) \
64
atc = adjust_arg(tc); \
65
ia64_save_scratch_fpregs(fr); \
66
ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \
67
adjust_arg(tm), atc); \
68
ia64_load_scratch_fpregs(fr); \
69
return ret; \
70
}
71
72
#define STUB_SET_TIME(prefix, adjust_arg) \
73
static efi_status_t \
74
prefix##_set_time (efi_time_t *tm) \
75
{ \
76
struct ia64_fpreg fr[6]; \
77
efi_status_t ret; \
78
\
79
ia64_save_scratch_fpregs(fr); \
80
ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \
81
adjust_arg(tm)); \
82
ia64_load_scratch_fpregs(fr); \
83
return ret; \
84
}
85
86
#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
87
static efi_status_t \
88
prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \
89
efi_time_t *tm) \
90
{ \
91
struct ia64_fpreg fr[6]; \
92
efi_status_t ret; \
93
\
94
ia64_save_scratch_fpregs(fr); \
95
ret = efi_call_##prefix( \
96
(efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
97
adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
98
ia64_load_scratch_fpregs(fr); \
99
return ret; \
100
}
101
102
#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
103
static efi_status_t \
104
prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
105
{ \
106
struct ia64_fpreg fr[6]; \
107
efi_time_t *atm = NULL; \
108
efi_status_t ret; \
109
\
110
if (tm) \
111
atm = adjust_arg(tm); \
112
ia64_save_scratch_fpregs(fr); \
113
ret = efi_call_##prefix( \
114
(efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
115
enabled, atm); \
116
ia64_load_scratch_fpregs(fr); \
117
return ret; \
118
}
119
120
#define STUB_GET_VARIABLE(prefix, adjust_arg) \
121
static efi_status_t \
122
prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
123
unsigned long *data_size, void *data) \
124
{ \
125
struct ia64_fpreg fr[6]; \
126
u32 *aattr = NULL; \
127
efi_status_t ret; \
128
\
129
if (attr) \
130
aattr = adjust_arg(attr); \
131
ia64_save_scratch_fpregs(fr); \
132
ret = efi_call_##prefix( \
133
(efi_get_variable_t *) __va(runtime->get_variable), \
134
adjust_arg(name), adjust_arg(vendor), aattr, \
135
adjust_arg(data_size), adjust_arg(data)); \
136
ia64_load_scratch_fpregs(fr); \
137
return ret; \
138
}
139
140
#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
141
static efi_status_t \
142
prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \
143
efi_guid_t *vendor) \
144
{ \
145
struct ia64_fpreg fr[6]; \
146
efi_status_t ret; \
147
\
148
ia64_save_scratch_fpregs(fr); \
149
ret = efi_call_##prefix( \
150
(efi_get_next_variable_t *) __va(runtime->get_next_variable), \
151
adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
152
ia64_load_scratch_fpregs(fr); \
153
return ret; \
154
}
155
156
#define STUB_SET_VARIABLE(prefix, adjust_arg) \
157
static efi_status_t \
158
prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \
159
unsigned long attr, unsigned long data_size, \
160
void *data) \
161
{ \
162
struct ia64_fpreg fr[6]; \
163
efi_status_t ret; \
164
\
165
ia64_save_scratch_fpregs(fr); \
166
ret = efi_call_##prefix( \
167
(efi_set_variable_t *) __va(runtime->set_variable), \
168
adjust_arg(name), adjust_arg(vendor), attr, data_size, \
169
adjust_arg(data)); \
170
ia64_load_scratch_fpregs(fr); \
171
return ret; \
172
}
173
174
#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
175
static efi_status_t \
176
prefix##_get_next_high_mono_count (u32 *count) \
177
{ \
178
struct ia64_fpreg fr[6]; \
179
efi_status_t ret; \
180
\
181
ia64_save_scratch_fpregs(fr); \
182
ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
183
__va(runtime->get_next_high_mono_count), \
184
adjust_arg(count)); \
185
ia64_load_scratch_fpregs(fr); \
186
return ret; \
187
}
188
189
#define STUB_RESET_SYSTEM(prefix, adjust_arg) \
190
static void \
191
prefix##_reset_system (int reset_type, efi_status_t status, \
192
unsigned long data_size, efi_char16_t *data) \
193
{ \
194
struct ia64_fpreg fr[6]; \
195
efi_char16_t *adata = NULL; \
196
\
197
if (data) \
198
adata = adjust_arg(data); \
199
\
200
ia64_save_scratch_fpregs(fr); \
201
efi_call_##prefix( \
202
(efi_reset_system_t *) __va(runtime->reset_system), \
203
reset_type, status, data_size, adata); \
204
/* should not return, but just in case... */ \
205
ia64_load_scratch_fpregs(fr); \
206
}
207
208
#define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
209
210
STUB_GET_TIME(phys, phys_ptr)
211
STUB_SET_TIME(phys, phys_ptr)
212
STUB_GET_WAKEUP_TIME(phys, phys_ptr)
213
STUB_SET_WAKEUP_TIME(phys, phys_ptr)
214
STUB_GET_VARIABLE(phys, phys_ptr)
215
STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
216
STUB_SET_VARIABLE(phys, phys_ptr)
217
STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
218
STUB_RESET_SYSTEM(phys, phys_ptr)
219
220
#define id(arg) arg
221
222
STUB_GET_TIME(virt, id)
223
STUB_SET_TIME(virt, id)
224
STUB_GET_WAKEUP_TIME(virt, id)
225
STUB_SET_WAKEUP_TIME(virt, id)
226
STUB_GET_VARIABLE(virt, id)
227
STUB_GET_NEXT_VARIABLE(virt, id)
228
STUB_SET_VARIABLE(virt, id)
229
STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
230
STUB_RESET_SYSTEM(virt, id)
231
232
void
233
efi_gettimeofday (struct timespec *ts)
234
{
235
efi_time_t tm;
236
237
if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) {
238
memset(ts, 0, sizeof(*ts));
239
return;
240
}
241
242
ts->tv_sec = mktime(tm.year, tm.month, tm.day,
243
tm.hour, tm.minute, tm.second);
244
ts->tv_nsec = tm.nanosecond;
245
}
246
247
static int
248
is_memory_available (efi_memory_desc_t *md)
249
{
250
if (!(md->attribute & EFI_MEMORY_WB))
251
return 0;
252
253
switch (md->type) {
254
case EFI_LOADER_CODE:
255
case EFI_LOADER_DATA:
256
case EFI_BOOT_SERVICES_CODE:
257
case EFI_BOOT_SERVICES_DATA:
258
case EFI_CONVENTIONAL_MEMORY:
259
return 1;
260
}
261
return 0;
262
}
263
264
typedef struct kern_memdesc {
265
u64 attribute;
266
u64 start;
267
u64 num_pages;
268
} kern_memdesc_t;
269
270
static kern_memdesc_t *kern_memmap;
271
272
#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
273
274
static inline u64
275
kmd_end(kern_memdesc_t *kmd)
276
{
277
return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
278
}
279
280
static inline u64
281
efi_md_end(efi_memory_desc_t *md)
282
{
283
return (md->phys_addr + efi_md_size(md));
284
}
285
286
static inline int
287
efi_wb(efi_memory_desc_t *md)
288
{
289
return (md->attribute & EFI_MEMORY_WB);
290
}
291
292
static inline int
293
efi_uc(efi_memory_desc_t *md)
294
{
295
return (md->attribute & EFI_MEMORY_UC);
296
}
297
298
static void
299
walk (efi_freemem_callback_t callback, void *arg, u64 attr)
300
{
301
kern_memdesc_t *k;
302
u64 start, end, voff;
303
304
voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET;
305
for (k = kern_memmap; k->start != ~0UL; k++) {
306
if (k->attribute != attr)
307
continue;
308
start = PAGE_ALIGN(k->start);
309
end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
310
if (start < end)
311
if ((*callback)(start + voff, end + voff, arg) < 0)
312
return;
313
}
314
}
315
316
/*
317
* Walk the EFI memory map and call CALLBACK once for each EFI memory
318
* descriptor that has memory that is available for OS use.
319
*/
320
void
321
efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
322
{
323
walk(callback, arg, EFI_MEMORY_WB);
324
}
325
326
/*
327
* Walk the EFI memory map and call CALLBACK once for each EFI memory
328
* descriptor that has memory that is available for uncached allocator.
329
*/
330
void
331
efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
332
{
333
walk(callback, arg, EFI_MEMORY_UC);
334
}
335
336
/*
337
* Look for the PAL_CODE region reported by EFI and map it using an
338
* ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
339
* Abstraction Layer chapter 11 in ADAG
340
*/
341
void *
342
efi_get_pal_addr (void)
343
{
344
void *efi_map_start, *efi_map_end, *p;
345
efi_memory_desc_t *md;
346
u64 efi_desc_size;
347
int pal_code_count = 0;
348
u64 vaddr, mask;
349
350
efi_map_start = __va(ia64_boot_param->efi_memmap);
351
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
352
efi_desc_size = ia64_boot_param->efi_memdesc_size;
353
354
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
355
md = p;
356
if (md->type != EFI_PAL_CODE)
357
continue;
358
359
if (++pal_code_count > 1) {
360
printk(KERN_ERR "Too many EFI Pal Code memory ranges, "
361
"dropped @ %llx\n", md->phys_addr);
362
continue;
363
}
364
/*
365
* The only ITLB entry in region 7 that is used is the one
366
* installed by __start(). That entry covers a 64MB range.
367
*/
368
mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
369
vaddr = PAGE_OFFSET + md->phys_addr;
370
371
/*
372
* We must check that the PAL mapping won't overlap with the
373
* kernel mapping.
374
*
375
* PAL code is guaranteed to be aligned on a power of 2 between
376
* 4k and 256KB and that only one ITR is needed to map it. This
377
* implies that the PAL code is always aligned on its size,
378
* i.e., the closest matching page size supported by the TLB.
379
* Therefore PAL code is guaranteed never to cross a 64MB unless
380
* it is bigger than 64MB (very unlikely!). So for now the
381
* following test is enough to determine whether or not we need
382
* a dedicated ITR for the PAL code.
383
*/
384
if ((vaddr & mask) == (KERNEL_START & mask)) {
385
printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
386
__func__);
387
continue;
388
}
389
390
if (efi_md_size(md) > IA64_GRANULE_SIZE)
391
panic("Whoa! PAL code size bigger than a granule!");
392
393
#if EFI_DEBUG
394
mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
395
396
printk(KERN_INFO "CPU %d: mapping PAL code "
397
"[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
398
smp_processor_id(), md->phys_addr,
399
md->phys_addr + efi_md_size(md),
400
vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
401
#endif
402
return __va(md->phys_addr);
403
}
404
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
405
__func__);
406
return NULL;
407
}
408
409
410
static u8 __init palo_checksum(u8 *buffer, u32 length)
411
{
412
u8 sum = 0;
413
u8 *end = buffer + length;
414
415
while (buffer < end)
416
sum = (u8) (sum + *(buffer++));
417
418
return sum;
419
}
420
421
/*
422
* Parse and handle PALO table which is published at:
423
* http://www.dig64.org/home/DIG64_PALO_R1_0.pdf
424
*/
425
static void __init handle_palo(unsigned long palo_phys)
426
{
427
struct palo_table *palo = __va(palo_phys);
428
u8 checksum;
429
430
if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) {
431
printk(KERN_INFO "PALO signature incorrect.\n");
432
return;
433
}
434
435
checksum = palo_checksum((u8 *)palo, palo->length);
436
if (checksum) {
437
printk(KERN_INFO "PALO checksum incorrect.\n");
438
return;
439
}
440
441
setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO);
442
}
443
444
void
445
efi_map_pal_code (void)
446
{
447
void *pal_vaddr = efi_get_pal_addr ();
448
u64 psr;
449
450
if (!pal_vaddr)
451
return;
452
453
/*
454
* Cannot write to CRx with PSR.ic=1
455
*/
456
psr = ia64_clear_ic();
457
ia64_itr(0x1, IA64_TR_PALCODE,
458
GRANULEROUNDDOWN((unsigned long) pal_vaddr),
459
pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
460
IA64_GRANULE_SHIFT);
461
paravirt_dv_serialize_data();
462
ia64_set_psr(psr); /* restore psr */
463
}
464
465
void __init
466
efi_init (void)
467
{
468
void *efi_map_start, *efi_map_end;
469
efi_config_table_t *config_tables;
470
efi_char16_t *c16;
471
u64 efi_desc_size;
472
char *cp, vendor[100] = "unknown";
473
int i;
474
unsigned long palo_phys;
475
476
/*
477
* It's too early to be able to use the standard kernel command line
478
* support...
479
*/
480
for (cp = boot_command_line; *cp; ) {
481
if (memcmp(cp, "mem=", 4) == 0) {
482
mem_limit = memparse(cp + 4, &cp);
483
} else if (memcmp(cp, "max_addr=", 9) == 0) {
484
max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
485
} else if (memcmp(cp, "min_addr=", 9) == 0) {
486
min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
487
} else {
488
while (*cp != ' ' && *cp)
489
++cp;
490
while (*cp == ' ')
491
++cp;
492
}
493
}
494
if (min_addr != 0UL)
495
printk(KERN_INFO "Ignoring memory below %lluMB\n",
496
min_addr >> 20);
497
if (max_addr != ~0UL)
498
printk(KERN_INFO "Ignoring memory above %lluMB\n",
499
max_addr >> 20);
500
501
efi.systab = __va(ia64_boot_param->efi_systab);
502
503
/*
504
* Verify the EFI Table
505
*/
506
if (efi.systab == NULL)
507
panic("Whoa! Can't find EFI system table.\n");
508
if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
509
panic("Whoa! EFI system table signature incorrect\n");
510
if ((efi.systab->hdr.revision >> 16) == 0)
511
printk(KERN_WARNING "Warning: EFI system table version "
512
"%d.%02d, expected 1.00 or greater\n",
513
efi.systab->hdr.revision >> 16,
514
efi.systab->hdr.revision & 0xffff);
515
516
config_tables = __va(efi.systab->tables);
517
518
/* Show what we know for posterity */
519
c16 = __va(efi.systab->fw_vendor);
520
if (c16) {
521
for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i)
522
vendor[i] = *c16++;
523
vendor[i] = '\0';
524
}
525
526
printk(KERN_INFO "EFI v%u.%.02u by %s:",
527
efi.systab->hdr.revision >> 16,
528
efi.systab->hdr.revision & 0xffff, vendor);
529
530
efi.mps = EFI_INVALID_TABLE_ADDR;
531
efi.acpi = EFI_INVALID_TABLE_ADDR;
532
efi.acpi20 = EFI_INVALID_TABLE_ADDR;
533
efi.smbios = EFI_INVALID_TABLE_ADDR;
534
efi.sal_systab = EFI_INVALID_TABLE_ADDR;
535
efi.boot_info = EFI_INVALID_TABLE_ADDR;
536
efi.hcdp = EFI_INVALID_TABLE_ADDR;
537
efi.uga = EFI_INVALID_TABLE_ADDR;
538
539
palo_phys = EFI_INVALID_TABLE_ADDR;
540
541
for (i = 0; i < (int) efi.systab->nr_tables; i++) {
542
if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
543
efi.mps = config_tables[i].table;
544
printk(" MPS=0x%lx", config_tables[i].table);
545
} else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
546
efi.acpi20 = config_tables[i].table;
547
printk(" ACPI 2.0=0x%lx", config_tables[i].table);
548
} else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
549
efi.acpi = config_tables[i].table;
550
printk(" ACPI=0x%lx", config_tables[i].table);
551
} else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
552
efi.smbios = config_tables[i].table;
553
printk(" SMBIOS=0x%lx", config_tables[i].table);
554
} else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
555
efi.sal_systab = config_tables[i].table;
556
printk(" SALsystab=0x%lx", config_tables[i].table);
557
} else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
558
efi.hcdp = config_tables[i].table;
559
printk(" HCDP=0x%lx", config_tables[i].table);
560
} else if (efi_guidcmp(config_tables[i].guid,
561
PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) {
562
palo_phys = config_tables[i].table;
563
printk(" PALO=0x%lx", config_tables[i].table);
564
}
565
}
566
printk("\n");
567
568
if (palo_phys != EFI_INVALID_TABLE_ADDR)
569
handle_palo(palo_phys);
570
571
runtime = __va(efi.systab->runtime);
572
efi.get_time = phys_get_time;
573
efi.set_time = phys_set_time;
574
efi.get_wakeup_time = phys_get_wakeup_time;
575
efi.set_wakeup_time = phys_set_wakeup_time;
576
efi.get_variable = phys_get_variable;
577
efi.get_next_variable = phys_get_next_variable;
578
efi.set_variable = phys_set_variable;
579
efi.get_next_high_mono_count = phys_get_next_high_mono_count;
580
efi.reset_system = phys_reset_system;
581
582
efi_map_start = __va(ia64_boot_param->efi_memmap);
583
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
584
efi_desc_size = ia64_boot_param->efi_memdesc_size;
585
586
#if EFI_DEBUG
587
/* print EFI memory map: */
588
{
589
efi_memory_desc_t *md;
590
void *p;
591
592
for (i = 0, p = efi_map_start; p < efi_map_end;
593
++i, p += efi_desc_size)
594
{
595
const char *unit;
596
unsigned long size;
597
598
md = p;
599
size = md->num_pages << EFI_PAGE_SHIFT;
600
601
if ((size >> 40) > 0) {
602
size >>= 40;
603
unit = "TB";
604
} else if ((size >> 30) > 0) {
605
size >>= 30;
606
unit = "GB";
607
} else if ((size >> 20) > 0) {
608
size >>= 20;
609
unit = "MB";
610
} else {
611
size >>= 10;
612
unit = "KB";
613
}
614
615
printk("mem%02d: type=%2u, attr=0x%016lx, "
616
"range=[0x%016lx-0x%016lx) (%4lu%s)\n",
617
i, md->type, md->attribute, md->phys_addr,
618
md->phys_addr + efi_md_size(md), size, unit);
619
}
620
}
621
#endif
622
623
efi_map_pal_code();
624
efi_enter_virtual_mode();
625
}
626
627
void
628
efi_enter_virtual_mode (void)
629
{
630
void *efi_map_start, *efi_map_end, *p;
631
efi_memory_desc_t *md;
632
efi_status_t status;
633
u64 efi_desc_size;
634
635
efi_map_start = __va(ia64_boot_param->efi_memmap);
636
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
637
efi_desc_size = ia64_boot_param->efi_memdesc_size;
638
639
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
640
md = p;
641
if (md->attribute & EFI_MEMORY_RUNTIME) {
642
/*
643
* Some descriptors have multiple bits set, so the
644
* order of the tests is relevant.
645
*/
646
if (md->attribute & EFI_MEMORY_WB) {
647
md->virt_addr = (u64) __va(md->phys_addr);
648
} else if (md->attribute & EFI_MEMORY_UC) {
649
md->virt_addr = (u64) ioremap(md->phys_addr, 0);
650
} else if (md->attribute & EFI_MEMORY_WC) {
651
#if 0
652
md->virt_addr = ia64_remap(md->phys_addr,
653
(_PAGE_A |
654
_PAGE_P |
655
_PAGE_D |
656
_PAGE_MA_WC |
657
_PAGE_PL_0 |
658
_PAGE_AR_RW));
659
#else
660
printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
661
md->virt_addr = (u64) ioremap(md->phys_addr, 0);
662
#endif
663
} else if (md->attribute & EFI_MEMORY_WT) {
664
#if 0
665
md->virt_addr = ia64_remap(md->phys_addr,
666
(_PAGE_A |
667
_PAGE_P |
668
_PAGE_D |
669
_PAGE_MA_WT |
670
_PAGE_PL_0 |
671
_PAGE_AR_RW));
672
#else
673
printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
674
md->virt_addr = (u64) ioremap(md->phys_addr, 0);
675
#endif
676
}
677
}
678
}
679
680
status = efi_call_phys(__va(runtime->set_virtual_address_map),
681
ia64_boot_param->efi_memmap_size,
682
efi_desc_size,
683
ia64_boot_param->efi_memdesc_version,
684
ia64_boot_param->efi_memmap);
685
if (status != EFI_SUCCESS) {
686
printk(KERN_WARNING "warning: unable to switch EFI into "
687
"virtual mode (status=%lu)\n", status);
688
return;
689
}
690
691
/*
692
* Now that EFI is in virtual mode, we call the EFI functions more
693
* efficiently:
694
*/
695
efi.get_time = virt_get_time;
696
efi.set_time = virt_set_time;
697
efi.get_wakeup_time = virt_get_wakeup_time;
698
efi.set_wakeup_time = virt_set_wakeup_time;
699
efi.get_variable = virt_get_variable;
700
efi.get_next_variable = virt_get_next_variable;
701
efi.set_variable = virt_set_variable;
702
efi.get_next_high_mono_count = virt_get_next_high_mono_count;
703
efi.reset_system = virt_reset_system;
704
}
705
706
/*
707
* Walk the EFI memory map looking for the I/O port range. There can only be
708
* one entry of this type, other I/O port ranges should be described via ACPI.
709
*/
710
u64
711
efi_get_iobase (void)
712
{
713
void *efi_map_start, *efi_map_end, *p;
714
efi_memory_desc_t *md;
715
u64 efi_desc_size;
716
717
efi_map_start = __va(ia64_boot_param->efi_memmap);
718
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
719
efi_desc_size = ia64_boot_param->efi_memdesc_size;
720
721
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
722
md = p;
723
if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
724
if (md->attribute & EFI_MEMORY_UC)
725
return md->phys_addr;
726
}
727
}
728
return 0;
729
}
730
731
static struct kern_memdesc *
732
kern_memory_descriptor (unsigned long phys_addr)
733
{
734
struct kern_memdesc *md;
735
736
for (md = kern_memmap; md->start != ~0UL; md++) {
737
if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
738
return md;
739
}
740
return NULL;
741
}
742
743
static efi_memory_desc_t *
744
efi_memory_descriptor (unsigned long phys_addr)
745
{
746
void *efi_map_start, *efi_map_end, *p;
747
efi_memory_desc_t *md;
748
u64 efi_desc_size;
749
750
efi_map_start = __va(ia64_boot_param->efi_memmap);
751
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
752
efi_desc_size = ia64_boot_param->efi_memdesc_size;
753
754
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
755
md = p;
756
757
if (phys_addr - md->phys_addr < efi_md_size(md))
758
return md;
759
}
760
return NULL;
761
}
762
763
static int
764
efi_memmap_intersects (unsigned long phys_addr, unsigned long size)
765
{
766
void *efi_map_start, *efi_map_end, *p;
767
efi_memory_desc_t *md;
768
u64 efi_desc_size;
769
unsigned long end;
770
771
efi_map_start = __va(ia64_boot_param->efi_memmap);
772
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
773
efi_desc_size = ia64_boot_param->efi_memdesc_size;
774
775
end = phys_addr + size;
776
777
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
778
md = p;
779
if (md->phys_addr < end && efi_md_end(md) > phys_addr)
780
return 1;
781
}
782
return 0;
783
}
784
785
u32
786
efi_mem_type (unsigned long phys_addr)
787
{
788
efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
789
790
if (md)
791
return md->type;
792
return 0;
793
}
794
795
u64
796
efi_mem_attributes (unsigned long phys_addr)
797
{
798
efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
799
800
if (md)
801
return md->attribute;
802
return 0;
803
}
804
EXPORT_SYMBOL(efi_mem_attributes);
805
806
u64
807
efi_mem_attribute (unsigned long phys_addr, unsigned long size)
808
{
809
unsigned long end = phys_addr + size;
810
efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
811
u64 attr;
812
813
if (!md)
814
return 0;
815
816
/*
817
* EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
818
* the kernel that firmware needs this region mapped.
819
*/
820
attr = md->attribute & ~EFI_MEMORY_RUNTIME;
821
do {
822
unsigned long md_end = efi_md_end(md);
823
824
if (end <= md_end)
825
return attr;
826
827
md = efi_memory_descriptor(md_end);
828
if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
829
return 0;
830
} while (md);
831
return 0; /* never reached */
832
}
833
834
u64
835
kern_mem_attribute (unsigned long phys_addr, unsigned long size)
836
{
837
unsigned long end = phys_addr + size;
838
struct kern_memdesc *md;
839
u64 attr;
840
841
/*
842
* This is a hack for ioremap calls before we set up kern_memmap.
843
* Maybe we should do efi_memmap_init() earlier instead.
844
*/
845
if (!kern_memmap) {
846
attr = efi_mem_attribute(phys_addr, size);
847
if (attr & EFI_MEMORY_WB)
848
return EFI_MEMORY_WB;
849
return 0;
850
}
851
852
md = kern_memory_descriptor(phys_addr);
853
if (!md)
854
return 0;
855
856
attr = md->attribute;
857
do {
858
unsigned long md_end = kmd_end(md);
859
860
if (end <= md_end)
861
return attr;
862
863
md = kern_memory_descriptor(md_end);
864
if (!md || md->attribute != attr)
865
return 0;
866
} while (md);
867
return 0; /* never reached */
868
}
869
EXPORT_SYMBOL(kern_mem_attribute);
870
871
int
872
valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
873
{
874
u64 attr;
875
876
/*
877
* /dev/mem reads and writes use copy_to_user(), which implicitly
878
* uses a granule-sized kernel identity mapping. It's really
879
* only safe to do this for regions in kern_memmap. For more
880
* details, see Documentation/ia64/aliasing.txt.
881
*/
882
attr = kern_mem_attribute(phys_addr, size);
883
if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
884
return 1;
885
return 0;
886
}
887
888
int
889
valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
890
{
891
unsigned long phys_addr = pfn << PAGE_SHIFT;
892
u64 attr;
893
894
attr = efi_mem_attribute(phys_addr, size);
895
896
/*
897
* /dev/mem mmap uses normal user pages, so we don't need the entire
898
* granule, but the entire region we're mapping must support the same
899
* attribute.
900
*/
901
if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
902
return 1;
903
904
/*
905
* Intel firmware doesn't tell us about all the MMIO regions, so
906
* in general we have to allow mmap requests. But if EFI *does*
907
* tell us about anything inside this region, we should deny it.
908
* The user can always map a smaller region to avoid the overlap.
909
*/
910
if (efi_memmap_intersects(phys_addr, size))
911
return 0;
912
913
return 1;
914
}
915
916
pgprot_t
917
phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
918
pgprot_t vma_prot)
919
{
920
unsigned long phys_addr = pfn << PAGE_SHIFT;
921
u64 attr;
922
923
/*
924
* For /dev/mem mmap, we use user mappings, but if the region is
925
* in kern_memmap (and hence may be covered by a kernel mapping),
926
* we must use the same attribute as the kernel mapping.
927
*/
928
attr = kern_mem_attribute(phys_addr, size);
929
if (attr & EFI_MEMORY_WB)
930
return pgprot_cacheable(vma_prot);
931
else if (attr & EFI_MEMORY_UC)
932
return pgprot_noncached(vma_prot);
933
934
/*
935
* Some chipsets don't support UC access to memory. If
936
* WB is supported, we prefer that.
937
*/
938
if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
939
return pgprot_cacheable(vma_prot);
940
941
return pgprot_noncached(vma_prot);
942
}
943
944
int __init
945
efi_uart_console_only(void)
946
{
947
efi_status_t status;
948
char *s, name[] = "ConOut";
949
efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
950
efi_char16_t *utf16, name_utf16[32];
951
unsigned char data[1024];
952
unsigned long size = sizeof(data);
953
struct efi_generic_dev_path *hdr, *end_addr;
954
int uart = 0;
955
956
/* Convert to UTF-16 */
957
utf16 = name_utf16;
958
s = name;
959
while (*s)
960
*utf16++ = *s++ & 0x7f;
961
*utf16 = 0;
962
963
status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
964
if (status != EFI_SUCCESS) {
965
printk(KERN_ERR "No EFI %s variable?\n", name);
966
return 0;
967
}
968
969
hdr = (struct efi_generic_dev_path *) data;
970
end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
971
while (hdr < end_addr) {
972
if (hdr->type == EFI_DEV_MSG &&
973
hdr->sub_type == EFI_DEV_MSG_UART)
974
uart = 1;
975
else if (hdr->type == EFI_DEV_END_PATH ||
976
hdr->type == EFI_DEV_END_PATH2) {
977
if (!uart)
978
return 0;
979
if (hdr->sub_type == EFI_DEV_END_ENTIRE)
980
return 1;
981
uart = 0;
982
}
983
hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length);
984
}
985
printk(KERN_ERR "Malformed %s value\n", name);
986
return 0;
987
}
988
989
/*
990
* Look for the first granule aligned memory descriptor memory
991
* that is big enough to hold EFI memory map. Make sure this
992
* descriptor is atleast granule sized so it does not get trimmed
993
*/
994
struct kern_memdesc *
995
find_memmap_space (void)
996
{
997
u64 contig_low=0, contig_high=0;
998
u64 as = 0, ae;
999
void *efi_map_start, *efi_map_end, *p, *q;
1000
efi_memory_desc_t *md, *pmd = NULL, *check_md;
1001
u64 space_needed, efi_desc_size;
1002
unsigned long total_mem = 0;
1003
1004
efi_map_start = __va(ia64_boot_param->efi_memmap);
1005
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1006
efi_desc_size = ia64_boot_param->efi_memdesc_size;
1007
1008
/*
1009
* Worst case: we need 3 kernel descriptors for each efi descriptor
1010
* (if every entry has a WB part in the middle, and UC head and tail),
1011
* plus one for the end marker.
1012
*/
1013
space_needed = sizeof(kern_memdesc_t) *
1014
(3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);
1015
1016
for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
1017
md = p;
1018
if (!efi_wb(md)) {
1019
continue;
1020
}
1021
if (pmd == NULL || !efi_wb(pmd) ||
1022
efi_md_end(pmd) != md->phys_addr) {
1023
contig_low = GRANULEROUNDUP(md->phys_addr);
1024
contig_high = efi_md_end(md);
1025
for (q = p + efi_desc_size; q < efi_map_end;
1026
q += efi_desc_size) {
1027
check_md = q;
1028
if (!efi_wb(check_md))
1029
break;
1030
if (contig_high != check_md->phys_addr)
1031
break;
1032
contig_high = efi_md_end(check_md);
1033
}
1034
contig_high = GRANULEROUNDDOWN(contig_high);
1035
}
1036
if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
1037
continue;
1038
1039
/* Round ends inward to granule boundaries */
1040
as = max(contig_low, md->phys_addr);
1041
ae = min(contig_high, efi_md_end(md));
1042
1043
/* keep within max_addr= and min_addr= command line arg */
1044
as = max(as, min_addr);
1045
ae = min(ae, max_addr);
1046
if (ae <= as)
1047
continue;
1048
1049
/* avoid going over mem= command line arg */
1050
if (total_mem + (ae - as) > mem_limit)
1051
ae -= total_mem + (ae - as) - mem_limit;
1052
1053
if (ae <= as)
1054
continue;
1055
1056
if (ae - as > space_needed)
1057
break;
1058
}
1059
if (p >= efi_map_end)
1060
panic("Can't allocate space for kernel memory descriptors");
1061
1062
return __va(as);
1063
}
1064
1065
/*
1066
* Walk the EFI memory map and gather all memory available for kernel
1067
* to use. We can allocate partial granules only if the unavailable
1068
* parts exist, and are WB.
1069
*/
1070
unsigned long
1071
efi_memmap_init(u64 *s, u64 *e)
1072
{
1073
struct kern_memdesc *k, *prev = NULL;
1074
u64 contig_low=0, contig_high=0;
1075
u64 as, ae, lim;
1076
void *efi_map_start, *efi_map_end, *p, *q;
1077
efi_memory_desc_t *md, *pmd = NULL, *check_md;
1078
u64 efi_desc_size;
1079
unsigned long total_mem = 0;
1080
1081
k = kern_memmap = find_memmap_space();
1082
1083
efi_map_start = __va(ia64_boot_param->efi_memmap);
1084
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1085
efi_desc_size = ia64_boot_param->efi_memdesc_size;
1086
1087
for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
1088
md = p;
1089
if (!efi_wb(md)) {
1090
if (efi_uc(md) &&
1091
(md->type == EFI_CONVENTIONAL_MEMORY ||
1092
md->type == EFI_BOOT_SERVICES_DATA)) {
1093
k->attribute = EFI_MEMORY_UC;
1094
k->start = md->phys_addr;
1095
k->num_pages = md->num_pages;
1096
k++;
1097
}
1098
continue;
1099
}
1100
if (pmd == NULL || !efi_wb(pmd) ||
1101
efi_md_end(pmd) != md->phys_addr) {
1102
contig_low = GRANULEROUNDUP(md->phys_addr);
1103
contig_high = efi_md_end(md);
1104
for (q = p + efi_desc_size; q < efi_map_end;
1105
q += efi_desc_size) {
1106
check_md = q;
1107
if (!efi_wb(check_md))
1108
break;
1109
if (contig_high != check_md->phys_addr)
1110
break;
1111
contig_high = efi_md_end(check_md);
1112
}
1113
contig_high = GRANULEROUNDDOWN(contig_high);
1114
}
1115
if (!is_memory_available(md))
1116
continue;
1117
1118
#ifdef CONFIG_CRASH_DUMP
1119
/* saved_max_pfn should ignore max_addr= command line arg */
1120
if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
1121
saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
1122
#endif
1123
/*
1124
* Round ends inward to granule boundaries
1125
* Give trimmings to uncached allocator
1126
*/
1127
if (md->phys_addr < contig_low) {
1128
lim = min(efi_md_end(md), contig_low);
1129
if (efi_uc(md)) {
1130
if (k > kern_memmap &&
1131
(k-1)->attribute == EFI_MEMORY_UC &&
1132
kmd_end(k-1) == md->phys_addr) {
1133
(k-1)->num_pages +=
1134
(lim - md->phys_addr)
1135
>> EFI_PAGE_SHIFT;
1136
} else {
1137
k->attribute = EFI_MEMORY_UC;
1138
k->start = md->phys_addr;
1139
k->num_pages = (lim - md->phys_addr)
1140
>> EFI_PAGE_SHIFT;
1141
k++;
1142
}
1143
}
1144
as = contig_low;
1145
} else
1146
as = md->phys_addr;
1147
1148
if (efi_md_end(md) > contig_high) {
1149
lim = max(md->phys_addr, contig_high);
1150
if (efi_uc(md)) {
1151
if (lim == md->phys_addr && k > kern_memmap &&
1152
(k-1)->attribute == EFI_MEMORY_UC &&
1153
kmd_end(k-1) == md->phys_addr) {
1154
(k-1)->num_pages += md->num_pages;
1155
} else {
1156
k->attribute = EFI_MEMORY_UC;
1157
k->start = lim;
1158
k->num_pages = (efi_md_end(md) - lim)
1159
>> EFI_PAGE_SHIFT;
1160
k++;
1161
}
1162
}
1163
ae = contig_high;
1164
} else
1165
ae = efi_md_end(md);
1166
1167
/* keep within max_addr= and min_addr= command line arg */
1168
as = max(as, min_addr);
1169
ae = min(ae, max_addr);
1170
if (ae <= as)
1171
continue;
1172
1173
/* avoid going over mem= command line arg */
1174
if (total_mem + (ae - as) > mem_limit)
1175
ae -= total_mem + (ae - as) - mem_limit;
1176
1177
if (ae <= as)
1178
continue;
1179
if (prev && kmd_end(prev) == md->phys_addr) {
1180
prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
1181
total_mem += ae - as;
1182
continue;
1183
}
1184
k->attribute = EFI_MEMORY_WB;
1185
k->start = as;
1186
k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
1187
total_mem += ae - as;
1188
prev = k++;
1189
}
1190
k->start = ~0L; /* end-marker */
1191
1192
/* reserve the memory we are using for kern_memmap */
1193
*s = (u64)kern_memmap;
1194
*e = (u64)++k;
1195
1196
return total_mem;
1197
}
1198
1199
void
1200
efi_initialize_iomem_resources(struct resource *code_resource,
1201
struct resource *data_resource,
1202
struct resource *bss_resource)
1203
{
1204
struct resource *res;
1205
void *efi_map_start, *efi_map_end, *p;
1206
efi_memory_desc_t *md;
1207
u64 efi_desc_size;
1208
char *name;
1209
unsigned long flags;
1210
1211
efi_map_start = __va(ia64_boot_param->efi_memmap);
1212
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1213
efi_desc_size = ia64_boot_param->efi_memdesc_size;
1214
1215
res = NULL;
1216
1217
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1218
md = p;
1219
1220
if (md->num_pages == 0) /* should not happen */
1221
continue;
1222
1223
flags = IORESOURCE_MEM | IORESOURCE_BUSY;
1224
switch (md->type) {
1225
1226
case EFI_MEMORY_MAPPED_IO:
1227
case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
1228
continue;
1229
1230
case EFI_LOADER_CODE:
1231
case EFI_LOADER_DATA:
1232
case EFI_BOOT_SERVICES_DATA:
1233
case EFI_BOOT_SERVICES_CODE:
1234
case EFI_CONVENTIONAL_MEMORY:
1235
if (md->attribute & EFI_MEMORY_WP) {
1236
name = "System ROM";
1237
flags |= IORESOURCE_READONLY;
1238
} else if (md->attribute == EFI_MEMORY_UC)
1239
name = "Uncached RAM";
1240
else
1241
name = "System RAM";
1242
break;
1243
1244
case EFI_ACPI_MEMORY_NVS:
1245
name = "ACPI Non-volatile Storage";
1246
break;
1247
1248
case EFI_UNUSABLE_MEMORY:
1249
name = "reserved";
1250
flags |= IORESOURCE_DISABLED;
1251
break;
1252
1253
case EFI_RESERVED_TYPE:
1254
case EFI_RUNTIME_SERVICES_CODE:
1255
case EFI_RUNTIME_SERVICES_DATA:
1256
case EFI_ACPI_RECLAIM_MEMORY:
1257
default:
1258
name = "reserved";
1259
break;
1260
}
1261
1262
if ((res = kzalloc(sizeof(struct resource),
1263
GFP_KERNEL)) == NULL) {
1264
printk(KERN_ERR
1265
"failed to allocate resource for iomem\n");
1266
return;
1267
}
1268
1269
res->name = name;
1270
res->start = md->phys_addr;
1271
res->end = md->phys_addr + efi_md_size(md) - 1;
1272
res->flags = flags;
1273
1274
if (insert_resource(&iomem_resource, res) < 0)
1275
kfree(res);
1276
else {
1277
/*
1278
* We don't know which region contains
1279
* kernel data so we try it repeatedly and
1280
* let the resource manager test it.
1281
*/
1282
insert_resource(res, code_resource);
1283
insert_resource(res, data_resource);
1284
insert_resource(res, bss_resource);
1285
#ifdef CONFIG_KEXEC
1286
insert_resource(res, &efi_memmap_res);
1287
insert_resource(res, &boot_param_res);
1288
if (crashk_res.end > crashk_res.start)
1289
insert_resource(res, &crashk_res);
1290
#endif
1291
}
1292
}
1293
}
1294
1295
#ifdef CONFIG_KEXEC
1296
/* find a block of memory aligned to 64M exclude reserved regions
1297
rsvd_regions are sorted
1298
*/
1299
unsigned long __init
1300
kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n)
1301
{
1302
int i;
1303
u64 start, end;
1304
u64 alignment = 1UL << _PAGE_SIZE_64M;
1305
void *efi_map_start, *efi_map_end, *p;
1306
efi_memory_desc_t *md;
1307
u64 efi_desc_size;
1308
1309
efi_map_start = __va(ia64_boot_param->efi_memmap);
1310
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1311
efi_desc_size = ia64_boot_param->efi_memdesc_size;
1312
1313
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1314
md = p;
1315
if (!efi_wb(md))
1316
continue;
1317
start = ALIGN(md->phys_addr, alignment);
1318
end = efi_md_end(md);
1319
for (i = 0; i < n; i++) {
1320
if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
1321
if (__pa(r[i].start) > start + size)
1322
return start;
1323
start = ALIGN(__pa(r[i].end), alignment);
1324
if (i < n-1 &&
1325
__pa(r[i+1].start) < start + size)
1326
continue;
1327
else
1328
break;
1329
}
1330
}
1331
if (end > start + size)
1332
return start;
1333
}
1334
1335
printk(KERN_WARNING
1336
"Cannot reserve 0x%lx byte of memory for crashdump\n", size);
1337
return ~0UL;
1338
}
1339
#endif
1340
1341
#ifdef CONFIG_CRASH_DUMP
1342
/* locate the size find a the descriptor at a certain address */
1343
unsigned long __init
1344
vmcore_find_descriptor_size (unsigned long address)
1345
{
1346
void *efi_map_start, *efi_map_end, *p;
1347
efi_memory_desc_t *md;
1348
u64 efi_desc_size;
1349
unsigned long ret = 0;
1350
1351
efi_map_start = __va(ia64_boot_param->efi_memmap);
1352
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1353
efi_desc_size = ia64_boot_param->efi_memdesc_size;
1354
1355
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1356
md = p;
1357
if (efi_wb(md) && md->type == EFI_LOADER_DATA
1358
&& md->phys_addr == address) {
1359
ret = efi_md_size(md);
1360
break;
1361
}
1362
}
1363
1364
if (ret == 0)
1365
printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");
1366
1367
return ret;
1368
}
1369
#endif
1370
1371