Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/kernel/efi.c
26292 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2015 Linaro Ltd <[email protected]>
4
*/
5
6
#include <linux/efi.h>
7
#include <linux/memblock.h>
8
#include <linux/screen_info.h>
9
10
#include <asm/efi.h>
11
#include <asm/mach/map.h>
12
#include <asm/mmu_context.h>
13
14
static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
15
{
16
efi_memory_desc_t *md = data;
17
pte_t pte = *ptep;
18
19
if (md->attribute & EFI_MEMORY_RO)
20
pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
21
if (md->attribute & EFI_MEMORY_XP)
22
pte = set_pte_bit(pte, __pgprot(L_PTE_XN));
23
set_pte_ext(ptep, pte, PTE_EXT_NG);
24
return 0;
25
}
26
27
int __init efi_set_mapping_permissions(struct mm_struct *mm,
28
efi_memory_desc_t *md,
29
bool ignored)
30
{
31
unsigned long base, size;
32
33
base = md->virt_addr;
34
size = md->num_pages << EFI_PAGE_SHIFT;
35
36
/*
37
* We can only use apply_to_page_range() if we can guarantee that the
38
* entire region was mapped using pages. This should be the case if the
39
* region does not cover any naturally aligned SECTION_SIZE sized
40
* blocks.
41
*/
42
if (round_down(base + size, SECTION_SIZE) <
43
round_up(base, SECTION_SIZE) + SECTION_SIZE)
44
return apply_to_page_range(mm, base, size, set_permissions, md);
45
46
return 0;
47
}
48
49
int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
50
{
51
struct map_desc desc = {
52
.virtual = md->virt_addr,
53
.pfn = __phys_to_pfn(md->phys_addr),
54
.length = md->num_pages * EFI_PAGE_SIZE,
55
};
56
57
/*
58
* Order is important here: memory regions may have all of the
59
* bits below set (and usually do), so we check them in order of
60
* preference.
61
*/
62
if (md->attribute & EFI_MEMORY_WB)
63
desc.type = MT_MEMORY_RWX;
64
else if (md->attribute & EFI_MEMORY_WT)
65
desc.type = MT_MEMORY_RWX_NONCACHED;
66
else if (md->attribute & EFI_MEMORY_WC)
67
desc.type = MT_DEVICE_WC;
68
else
69
desc.type = MT_DEVICE;
70
71
create_mapping_late(mm, &desc, true);
72
73
/*
74
* If stricter permissions were specified, apply them now.
75
*/
76
if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
77
return efi_set_mapping_permissions(mm, md, false);
78
return 0;
79
}
80
81
static unsigned long __initdata cpu_state_table = EFI_INVALID_TABLE_ADDR;
82
83
const efi_config_table_type_t efi_arch_tables[] __initconst = {
84
{LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table},
85
{}
86
};
87
88
static void __init load_cpu_state_table(void)
89
{
90
if (cpu_state_table != EFI_INVALID_TABLE_ADDR) {
91
struct efi_arm_entry_state *state;
92
bool dump_state = true;
93
94
state = early_memremap_ro(cpu_state_table,
95
sizeof(struct efi_arm_entry_state));
96
if (state == NULL) {
97
pr_warn("Unable to map CPU entry state table.\n");
98
return;
99
}
100
101
if ((state->sctlr_before_ebs & 1) == 0)
102
pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n");
103
else if ((state->sctlr_after_ebs & 1) == 0)
104
pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n");
105
else
106
dump_state = false;
107
108
if (dump_state || efi_enabled(EFI_DBG)) {
109
pr_info("CPSR at EFI stub entry : 0x%08x\n",
110
state->cpsr_before_ebs);
111
pr_info("SCTLR at EFI stub entry : 0x%08x\n",
112
state->sctlr_before_ebs);
113
pr_info("CPSR after ExitBootServices() : 0x%08x\n",
114
state->cpsr_after_ebs);
115
pr_info("SCTLR after ExitBootServices(): 0x%08x\n",
116
state->sctlr_after_ebs);
117
}
118
early_memunmap(state, sizeof(struct efi_arm_entry_state));
119
}
120
}
121
122
void __init arm_efi_init(void)
123
{
124
efi_init();
125
126
/* ARM does not permit early mappings to persist across paging_init() */
127
efi_memmap_unmap();
128
129
load_cpu_state_table();
130
}
131
132