Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/boot/startup/sev-startup.c
50903 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* AMD Memory Encryption Support
4
*
5
* Copyright (C) 2019 SUSE
6
*
7
* Author: Joerg Roedel <[email protected]>
8
*/
9
10
#define pr_fmt(fmt) "SEV: " fmt
11
12
#include <linux/percpu-defs.h>
13
#include <linux/cc_platform.h>
14
#include <linux/printk.h>
15
#include <linux/mm_types.h>
16
#include <linux/set_memory.h>
17
#include <linux/memblock.h>
18
#include <linux/kernel.h>
19
#include <linux/mm.h>
20
#include <linux/cpumask.h>
21
#include <linux/efi.h>
22
#include <linux/io.h>
23
#include <linux/psp-sev.h>
24
#include <uapi/linux/sev-guest.h>
25
26
#include <asm/init.h>
27
#include <asm/cpu_entry_area.h>
28
#include <asm/stacktrace.h>
29
#include <asm/sev.h>
30
#include <asm/insn-eval.h>
31
#include <asm/fpu/xcr.h>
32
#include <asm/processor.h>
33
#include <asm/realmode.h>
34
#include <asm/setup.h>
35
#include <asm/traps.h>
36
#include <asm/svm.h>
37
#include <asm/smp.h>
38
#include <asm/cpu.h>
39
#include <asm/apic.h>
40
#include <asm/cpuid/api.h>
41
#include <asm/cmdline.h>
42
43
#include "../../coco/sev/internal.h"
44
45
/* Include code shared with pre-decompression boot stage */
46
#include "sev-shared.c"
47
48
void
49
early_set_pages_state(unsigned long vaddr, unsigned long paddr,
50
unsigned long npages, const struct psc_desc *desc)
51
{
52
unsigned long paddr_end;
53
54
vaddr = vaddr & PAGE_MASK;
55
56
paddr = paddr & PAGE_MASK;
57
paddr_end = paddr + (npages << PAGE_SHIFT);
58
59
while (paddr < paddr_end) {
60
__page_state_change(vaddr, paddr, desc);
61
62
vaddr += PAGE_SIZE;
63
paddr += PAGE_SIZE;
64
}
65
}
66
67
void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
68
unsigned long npages)
69
{
70
struct psc_desc d = {
71
SNP_PAGE_STATE_PRIVATE,
72
rip_rel_ptr(&boot_svsm_ca_page),
73
boot_svsm_caa_pa
74
};
75
76
/*
77
* This can be invoked in early boot while running identity mapped, so
78
* use an open coded check for SNP instead of using cc_platform_has().
79
* This eliminates worries about jump tables or checking boot_cpu_data
80
* in the cc_platform_has() function.
81
*/
82
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
83
return;
84
85
/*
86
* Ask the hypervisor to mark the memory pages as private in the RMP
87
* table.
88
*/
89
early_set_pages_state(vaddr, paddr, npages, &d);
90
}
91
92
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
93
unsigned long npages)
94
{
95
struct psc_desc d = {
96
SNP_PAGE_STATE_SHARED,
97
rip_rel_ptr(&boot_svsm_ca_page),
98
boot_svsm_caa_pa
99
};
100
101
/*
102
* This can be invoked in early boot while running identity mapped, so
103
* use an open coded check for SNP instead of using cc_platform_has().
104
* This eliminates worries about jump tables or checking boot_cpu_data
105
* in the cc_platform_has() function.
106
*/
107
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
108
return;
109
110
/* Ask hypervisor to mark the memory pages shared in the RMP table. */
111
early_set_pages_state(vaddr, paddr, npages, &d);
112
}
113
114
/*
115
* Initial set up of SNP relies on information provided by the
116
* Confidential Computing blob, which can be passed to the kernel
117
* in the following ways, depending on how it is booted:
118
*
119
* - when booted via the boot/decompress kernel:
120
* - via boot_params
121
*
122
* - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
123
* - via a setup_data entry, as defined by the Linux Boot Protocol
124
*
125
* Scan for the blob in that order.
126
*/
127
static struct cc_blob_sev_info *__init find_cc_blob(struct boot_params *bp)
128
{
129
struct cc_blob_sev_info *cc_info;
130
131
/* Boot kernel would have passed the CC blob via boot_params. */
132
if (bp->cc_blob_address) {
133
cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
134
goto found_cc_info;
135
}
136
137
/*
138
* If kernel was booted directly, without the use of the
139
* boot/decompression kernel, the CC blob may have been passed via
140
* setup_data instead.
141
*/
142
cc_info = find_cc_blob_setup_data(bp);
143
if (!cc_info)
144
return NULL;
145
146
found_cc_info:
147
if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
148
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
149
150
return cc_info;
151
}
152
153
static void __init svsm_setup(struct cc_blob_sev_info *cc_info)
154
{
155
struct snp_secrets_page *secrets = (void *)cc_info->secrets_phys;
156
struct svsm_call call = {};
157
u64 pa;
158
159
/*
160
* Record the SVSM Calling Area address (CAA) if the guest is not
161
* running at VMPL0. The CA will be used to communicate with the
162
* SVSM to perform the SVSM services.
163
*/
164
if (!svsm_setup_ca(cc_info, rip_rel_ptr(&boot_svsm_ca_page)))
165
return;
166
167
/*
168
* It is very early in the boot and the kernel is running identity
169
* mapped but without having adjusted the pagetables to where the
170
* kernel was loaded (physbase), so the get the CA address using
171
* RIP-relative addressing.
172
*/
173
pa = (u64)rip_rel_ptr(&boot_svsm_ca_page);
174
175
/*
176
* Switch over to the boot SVSM CA while the current CA is still 1:1
177
* mapped and thus addressable with VA == PA. There is no GHCB at this
178
* point so use the MSR protocol.
179
*
180
* SVSM_CORE_REMAP_CA call:
181
* RAX = 0 (Protocol=0, CallID=0)
182
* RCX = New CA GPA
183
*/
184
call.caa = (struct svsm_ca *)secrets->svsm_caa;
185
call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
186
call.rcx = pa;
187
188
if (svsm_call_msr_protocol(&call))
189
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
190
191
boot_svsm_caa_pa = pa;
192
}
193
194
bool __init snp_init(struct boot_params *bp)
195
{
196
struct cc_blob_sev_info *cc_info;
197
198
if (!bp)
199
return false;
200
201
cc_info = find_cc_blob(bp);
202
if (!cc_info)
203
return false;
204
205
if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
206
sev_secrets_pa = cc_info->secrets_phys;
207
else
208
return false;
209
210
setup_cpuid_table(cc_info);
211
212
svsm_setup(cc_info);
213
214
/*
215
* The CC blob will be used later to access the secrets page. Cache
216
* it here like the boot kernel does.
217
*/
218
bp->cc_blob_address = (u32)(unsigned long)cc_info;
219
220
return true;
221
}
222
223