Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/coco/sev/svsm.c
121834 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* SVSM support code
4
*/
5
6
#include <linux/types.h>
7
8
#include <asm/sev.h>
9
10
#include "internal.h"
11
12
/* For early boot SVSM communication */
13
struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
14
SYM_PIC_ALIAS(boot_svsm_ca_page);
15
16
/*
17
* SVSM related information:
18
* During boot, the page tables are set up as identity mapped and later
19
* changed to use kernel virtual addresses. Maintain separate virtual and
20
* physical addresses for the CAA to allow SVSM functions to be used during
21
* early boot, both with identity mapped virtual addresses and proper kernel
22
* virtual addresses.
23
*/
24
u64 boot_svsm_caa_pa __ro_after_init;
25
SYM_PIC_ALIAS(boot_svsm_caa_pa);
26
27
DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
28
DEFINE_PER_CPU(u64, svsm_caa_pa);
29
30
static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
31
{
32
struct es_em_ctxt ctxt;
33
u8 pending = 0;
34
35
vc_ghcb_invalidate(ghcb);
36
37
/*
38
* Fill in protocol and format specifiers. This can be called very early
39
* in the boot, so use rip-relative references as needed.
40
*/
41
ghcb->protocol_version = ghcb_version;
42
ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
43
44
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL);
45
ghcb_set_sw_exit_info_1(ghcb, 0);
46
ghcb_set_sw_exit_info_2(ghcb, 0);
47
48
sev_es_wr_ghcb_msr(__pa(ghcb));
49
50
svsm_issue_call(call, &pending);
51
52
if (pending)
53
return -EINVAL;
54
55
switch (verify_exception_info(ghcb, &ctxt)) {
56
case ES_OK:
57
break;
58
case ES_EXCEPTION:
59
vc_forward_exception(&ctxt);
60
fallthrough;
61
default:
62
return -EINVAL;
63
}
64
65
return svsm_process_result_codes(call);
66
}
67
68
int svsm_perform_call_protocol(struct svsm_call *call)
69
{
70
struct ghcb_state state;
71
unsigned long flags;
72
struct ghcb *ghcb;
73
int ret;
74
75
flags = native_local_irq_save();
76
77
if (sev_cfg.ghcbs_initialized)
78
ghcb = __sev_get_ghcb(&state);
79
else if (boot_ghcb)
80
ghcb = boot_ghcb;
81
else
82
ghcb = NULL;
83
84
do {
85
ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
86
: __pi_svsm_perform_msr_protocol(call);
87
} while (ret == -EAGAIN);
88
89
if (sev_cfg.ghcbs_initialized)
90
__sev_put_ghcb(&state);
91
92
native_local_irq_restore(flags);
93
94
return ret;
95
}
96
97
static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
98
struct svsm_pvalidate_call *pc)
99
{
100
struct svsm_pvalidate_entry *pe;
101
102
/* Nothing in the CA yet */
103
pc->num_entries = 0;
104
pc->cur_index = 0;
105
106
pe = &pc->entry[0];
107
108
while (pfn < pfn_end) {
109
pe->page_size = RMP_PG_SIZE_4K;
110
pe->action = action;
111
pe->ignore_cf = 0;
112
pe->rsvd = 0;
113
pe->pfn = pfn;
114
115
pe++;
116
pfn++;
117
118
pc->num_entries++;
119
if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
120
break;
121
}
122
123
return pfn;
124
}
125
126
static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
127
struct svsm_pvalidate_call *pc)
128
{
129
struct svsm_pvalidate_entry *pe;
130
struct psc_entry *e;
131
132
/* Nothing in the CA yet */
133
pc->num_entries = 0;
134
pc->cur_index = 0;
135
136
pe = &pc->entry[0];
137
e = &desc->entries[desc_entry];
138
139
while (desc_entry <= desc->hdr.end_entry) {
140
pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
141
pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
142
pe->ignore_cf = 0;
143
pe->rsvd = 0;
144
pe->pfn = e->gfn;
145
146
pe++;
147
e++;
148
149
desc_entry++;
150
pc->num_entries++;
151
if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
152
break;
153
}
154
155
return desc_entry;
156
}
157
158
static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
159
{
160
unsigned int page_size;
161
bool action;
162
u64 pfn;
163
164
pfn = pc->entry[pc->cur_index].pfn;
165
action = pc->entry[pc->cur_index].action;
166
page_size = pc->entry[pc->cur_index].page_size;
167
168
__pval_terminate(pfn, action, page_size, ret, svsm_ret);
169
}
170
171
void svsm_pval_pages(struct snp_psc_desc *desc)
172
{
173
struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
174
unsigned int i, pv_4k_count = 0;
175
struct svsm_pvalidate_call *pc;
176
struct svsm_call call = {};
177
unsigned long flags;
178
bool action;
179
u64 pc_pa;
180
int ret;
181
182
/*
183
* This can be called very early in the boot, use native functions in
184
* order to avoid paravirt issues.
185
*/
186
flags = native_local_irq_save();
187
188
/*
189
* The SVSM calling area (CA) can support processing 510 entries at a
190
* time. Loop through the Page State Change descriptor until the CA is
191
* full or the last entry in the descriptor is reached, at which time
192
* the SVSM is invoked. This repeats until all entries in the descriptor
193
* are processed.
194
*/
195
call.caa = svsm_get_caa();
196
197
pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
198
pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
199
200
/* Protocol 0, Call ID 1 */
201
call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
202
call.rcx = pc_pa;
203
204
for (i = 0; i <= desc->hdr.end_entry;) {
205
i = svsm_build_ca_from_psc_desc(desc, i, pc);
206
207
do {
208
ret = svsm_perform_call_protocol(&call);
209
if (!ret)
210
continue;
211
212
/*
213
* Check if the entry failed because of an RMP mismatch (a
214
* PVALIDATE at 2M was requested, but the page is mapped in
215
* the RMP as 4K).
216
*/
217
218
if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
219
pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
220
/* Save this entry for post-processing at 4K */
221
pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
222
223
/* Skip to the next one unless at the end of the list */
224
pc->cur_index++;
225
if (pc->cur_index < pc->num_entries)
226
ret = -EAGAIN;
227
else
228
ret = 0;
229
}
230
} while (ret == -EAGAIN);
231
232
if (ret)
233
svsm_pval_terminate(pc, ret, call.rax_out);
234
}
235
236
/* Process any entries that failed to be validated at 2M and validate them at 4K */
237
for (i = 0; i < pv_4k_count; i++) {
238
u64 pfn, pfn_end;
239
240
action = pv_4k[i].action;
241
pfn = pv_4k[i].pfn;
242
pfn_end = pfn + 512;
243
244
while (pfn < pfn_end) {
245
pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
246
247
ret = svsm_perform_call_protocol(&call);
248
if (ret)
249
svsm_pval_terminate(pc, ret, call.rax_out);
250
}
251
}
252
253
native_local_irq_restore(flags);
254
}
255
256
static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input)
257
{
258
/* If (new) lengths have been returned, propagate them up */
259
if (call->rcx_out != call->rcx)
260
input->manifest_buf.len = call->rcx_out;
261
262
if (call->rdx_out != call->rdx)
263
input->certificates_buf.len = call->rdx_out;
264
265
if (call->r8_out != call->r8)
266
input->report_buf.len = call->r8_out;
267
}
268
269
int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call,
270
struct svsm_attest_call *input)
271
{
272
struct svsm_attest_call *ac;
273
unsigned long flags;
274
u64 attest_call_pa;
275
int ret;
276
277
if (!snp_vmpl)
278
return -EINVAL;
279
280
local_irq_save(flags);
281
282
call->caa = svsm_get_caa();
283
284
ac = (struct svsm_attest_call *)call->caa->svsm_buffer;
285
attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
286
287
*ac = *input;
288
289
/*
290
* Set input registers for the request and set RDX and R8 to known
291
* values in order to detect length values being returned in them.
292
*/
293
call->rax = call_id;
294
call->rcx = attest_call_pa;
295
call->rdx = -1;
296
call->r8 = -1;
297
ret = svsm_perform_call_protocol(call);
298
update_attest_input(call, input);
299
300
local_irq_restore(flags);
301
302
return ret;
303
}
304
EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req);
305
306
/**
307
* snp_svsm_vtpm_send_command() - Execute a vTPM operation on SVSM
308
* @buffer: A buffer used to both send the command and receive the response.
309
*
310
* Execute a SVSM_VTPM_CMD call as defined by
311
* "Secure VM Service Module for SEV-SNP Guests" Publication # 58019 Revision: 1.00
312
*
313
* All command request/response buffers have a common structure as specified by
314
* the following table:
315
* Byte Size     In/Out    Description
316
* Offset    (Bytes)
317
* 0x000     4          In        Platform command
318
 *                         Out       Platform command response size
319
*
320
* Each command can build upon this common request/response structure to create
321
* a structure specific to the command. See include/linux/tpm_svsm.h for more
322
* details.
323
*
324
* Return: 0 on success, -errno on failure
325
*/
326
int snp_svsm_vtpm_send_command(u8 *buffer)
327
{
328
struct svsm_call call = {};
329
330
call.caa = svsm_get_caa();
331
call.rax = SVSM_VTPM_CALL(SVSM_VTPM_CMD);
332
call.rcx = __pa(buffer);
333
334
return svsm_perform_call_protocol(&call);
335
}
336
EXPORT_SYMBOL_GPL(snp_svsm_vtpm_send_command);
337
338
/**
339
* snp_svsm_vtpm_probe() - Probe if SVSM provides a vTPM device
340
*
341
* Check that there is SVSM and that it supports at least TPM_SEND_COMMAND
342
* which is the only request used so far.
343
*
344
* Return: true if the platform provides a vTPM SVSM device, false otherwise.
345
*/
346
bool snp_svsm_vtpm_probe(void)
347
{
348
struct svsm_call call = {};
349
350
/* The vTPM device is available only if a SVSM is present */
351
if (!snp_vmpl)
352
return false;
353
354
call.caa = svsm_get_caa();
355
call.rax = SVSM_VTPM_CALL(SVSM_VTPM_QUERY);
356
357
if (svsm_perform_call_protocol(&call))
358
return false;
359
360
/* Check platform commands contains TPM_SEND_COMMAND - platform command 8 */
361
return call.rcx_out & BIT_ULL(8);
362
}
363
364