Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/ccp/sev-dev-tio.c
38184 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
// Interface to PSP for CCP/SEV-TIO/SNP-VM
4
5
#include <linux/pci.h>
6
#include <linux/tsm.h>
7
#include <linux/psp.h>
8
#include <linux/vmalloc.h>
9
#include <linux/bitfield.h>
10
#include <linux/pci-doe.h>
11
#include <asm/sev-common.h>
12
#include <asm/sev.h>
13
#include <asm/page.h>
14
#include "sev-dev.h"
15
#include "sev-dev-tio.h"
16
17
#define to_tio_status(dev_data) \
18
(container_of((dev_data), struct tio_dsm, data)->sev->tio_status)
19
20
#define SLA_PAGE_TYPE_DATA 0
21
#define SLA_PAGE_TYPE_SCATTER 1
22
#define SLA_PAGE_SIZE_4K 0
23
#define SLA_PAGE_SIZE_2M 1
24
#define SLA_SZ(s) ((s).page_size == SLA_PAGE_SIZE_2M ? SZ_2M : SZ_4K)
25
#define SLA_SCATTER_LEN(s) (SLA_SZ(s) / sizeof(struct sla_addr_t))
26
#define SLA_EOL ((struct sla_addr_t) { .pfn = ((1UL << 40) - 1) })
27
#define SLA_NULL ((struct sla_addr_t) { 0 })
28
#define IS_SLA_NULL(s) ((s).sla == SLA_NULL.sla)
29
#define IS_SLA_EOL(s) ((s).sla == SLA_EOL.sla)
30
31
static phys_addr_t sla_to_pa(struct sla_addr_t sla)
32
{
33
u64 pfn = sla.pfn;
34
u64 pa = pfn << PAGE_SHIFT;
35
36
return pa;
37
}
38
39
static void *sla_to_va(struct sla_addr_t sla)
40
{
41
void *va = __va(__sme_clr(sla_to_pa(sla)));
42
43
return va;
44
}
45
46
#define sla_to_pfn(sla) (__pa(sla_to_va(sla)) >> PAGE_SHIFT)
47
#define sla_to_page(sla) virt_to_page(sla_to_va(sla))
48
49
static struct sla_addr_t make_sla(struct page *pg, bool stp)
50
{
51
u64 pa = __sme_set(page_to_phys(pg));
52
struct sla_addr_t ret = {
53
.pfn = pa >> PAGE_SHIFT,
54
.page_size = SLA_PAGE_SIZE_4K, /* Do not do SLA_PAGE_SIZE_2M ATM */
55
.page_type = stp ? SLA_PAGE_TYPE_SCATTER : SLA_PAGE_TYPE_DATA
56
};
57
58
return ret;
59
}
60
61
/* the BUFFER Structure */
62
#define SLA_BUFFER_FLAG_ENCRYPTION BIT(0)
63
64
/*
65
* struct sla_buffer_hdr - Scatter list address buffer header
66
*
67
* @capacity_sz: Total capacity of the buffer in bytes
68
* @payload_sz: Size of buffer payload in bytes, must be multiple of 32B
69
* @flags: Buffer flags (SLA_BUFFER_FLAG_ENCRYPTION: buffer is encrypted)
70
* @iv: Initialization vector used for encryption
71
* @authtag: Authentication tag for encrypted buffer
72
*/
73
struct sla_buffer_hdr {
74
u32 capacity_sz;
75
u32 payload_sz; /* The size of BUFFER_PAYLOAD in bytes. Must be multiple of 32B */
76
u32 flags;
77
u8 reserved1[4];
78
u8 iv[16]; /* IV used for the encryption of this buffer */
79
u8 authtag[16]; /* Authentication tag for this buffer */
80
u8 reserved2[16];
81
} __packed;
82
83
enum spdm_data_type_t {
84
DOBJ_DATA_TYPE_SPDM = 0x1,
85
DOBJ_DATA_TYPE_SECURE_SPDM = 0x2,
86
};
87
88
struct spdm_dobj_hdr_req {
89
struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_REQ */
90
u8 data_type; /* spdm_data_type_t */
91
u8 reserved2[5];
92
} __packed;
93
94
struct spdm_dobj_hdr_resp {
95
struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_RESP */
96
u8 data_type; /* spdm_data_type_t */
97
u8 reserved2[5];
98
} __packed;
99
100
/* Defined in sev-dev-tio.h so sev-dev-tsm.c can read types of blobs */
101
struct spdm_dobj_hdr_cert;
102
struct spdm_dobj_hdr_meas;
103
struct spdm_dobj_hdr_report;
104
105
/* Used in all SPDM-aware TIO commands */
106
struct spdm_ctrl {
107
struct sla_addr_t req;
108
struct sla_addr_t resp;
109
struct sla_addr_t scratch;
110
struct sla_addr_t output;
111
} __packed;
112
113
static size_t sla_dobj_id_to_size(u8 id)
114
{
115
size_t n;
116
117
BUILD_BUG_ON(sizeof(struct spdm_dobj_hdr_resp) != 0x10);
118
switch (id) {
119
case SPDM_DOBJ_ID_REQ:
120
n = sizeof(struct spdm_dobj_hdr_req);
121
break;
122
case SPDM_DOBJ_ID_RESP:
123
n = sizeof(struct spdm_dobj_hdr_resp);
124
break;
125
default:
126
WARN_ON(1);
127
n = 0;
128
break;
129
}
130
131
return n;
132
}
133
134
#define SPDM_DOBJ_HDR_SIZE(hdr) sla_dobj_id_to_size((hdr)->id)
135
#define SPDM_DOBJ_DATA(hdr) ((u8 *)(hdr) + SPDM_DOBJ_HDR_SIZE(hdr))
136
#define SPDM_DOBJ_LEN(hdr) ((hdr)->length - SPDM_DOBJ_HDR_SIZE(hdr))
137
138
#define sla_to_dobj_resp_hdr(buf) ((struct spdm_dobj_hdr_resp *) \
139
sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_RESP))
140
#define sla_to_dobj_req_hdr(buf) ((struct spdm_dobj_hdr_req *) \
141
sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_REQ))
142
143
static struct spdm_dobj_hdr *sla_to_dobj_hdr(struct sla_buffer_hdr *buf)
144
{
145
if (!buf)
146
return NULL;
147
148
return (struct spdm_dobj_hdr *) &buf[1];
149
}
150
151
static struct spdm_dobj_hdr *sla_to_dobj_hdr_check(struct sla_buffer_hdr *buf, u32 check_dobjid)
152
{
153
struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
154
155
if (WARN_ON_ONCE(!hdr))
156
return NULL;
157
158
if (hdr->id != check_dobjid) {
159
pr_err("! ERROR: expected %d, found %d\n", check_dobjid, hdr->id);
160
return NULL;
161
}
162
163
return hdr;
164
}
165
166
static void *sla_to_data(struct sla_buffer_hdr *buf, u32 dobjid)
167
{
168
struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
169
170
if (WARN_ON_ONCE(dobjid != SPDM_DOBJ_ID_REQ && dobjid != SPDM_DOBJ_ID_RESP))
171
return NULL;
172
173
if (!hdr)
174
return NULL;
175
176
return (u8 *) hdr + sla_dobj_id_to_size(dobjid);
177
}
178
179
/*
180
* struct sev_data_tio_status - SEV_CMD_TIO_STATUS command
181
*
182
* @length: Length of this command buffer in bytes
183
* @status_paddr: System physical address of the TIO_STATUS structure
184
*/
185
struct sev_data_tio_status {
186
u32 length;
187
u8 reserved[4];
188
u64 status_paddr;
189
} __packed;
190
191
/* TIO_INIT */
192
struct sev_data_tio_init {
193
u32 length;
194
u8 reserved[12];
195
} __packed;
196
197
/*
198
* struct sev_data_tio_dev_create - TIO_DEV_CREATE command
199
*
200
* @length: Length in bytes of this command buffer
201
* @dev_ctx_sla: Scatter list address pointing to a buffer to be used as a device context buffer
202
* @device_id: PCIe Routing Identifier of the device to connect to
203
* @root_port_id: PCIe Routing Identifier of the root port of the device
204
* @segment_id: PCIe Segment Identifier of the device to connect to
205
*/
206
struct sev_data_tio_dev_create {
207
u32 length;
208
u8 reserved1[4];
209
struct sla_addr_t dev_ctx_sla;
210
u16 device_id;
211
u16 root_port_id;
212
u8 segment_id;
213
u8 reserved2[11];
214
} __packed;
215
216
/*
217
* struct sev_data_tio_dev_connect - TIO_DEV_CONNECT command
218
*
219
* @length: Length in bytes of this command buffer
220
* @spdm_ctrl: SPDM control structure defined in Section 5.1
221
* @dev_ctx_sla: Scatter list address of the device context buffer
222
* @tc_mask: Bitmask of the traffic classes to initialize for SEV-TIO usage.
223
* Setting the kth bit of the TC_MASK to 1 indicates that the traffic
224
* class k will be initialized
225
* @cert_slot: Slot number of the certificate requested for constructing the SPDM session
226
* @ide_stream_id: IDE stream IDs to be associated with this device.
227
* Valid only if corresponding bit in TC_MASK is set
228
*/
229
struct sev_data_tio_dev_connect {
230
u32 length;
231
u8 reserved1[4];
232
struct spdm_ctrl spdm_ctrl;
233
u8 reserved2[8];
234
struct sla_addr_t dev_ctx_sla;
235
u8 tc_mask;
236
u8 cert_slot;
237
u8 reserved3[6];
238
u8 ide_stream_id[8];
239
u8 reserved4[8];
240
} __packed;
241
242
/*
243
* struct sev_data_tio_dev_disconnect - TIO_DEV_DISCONNECT command
244
*
245
* @length: Length in bytes of this command buffer
246
* @flags: Command flags (TIO_DEV_DISCONNECT_FLAG_FORCE: force disconnect)
247
* @spdm_ctrl: SPDM control structure defined in Section 5.1
248
* @dev_ctx_sla: Scatter list address of the device context buffer
249
*/
250
#define TIO_DEV_DISCONNECT_FLAG_FORCE BIT(0)
251
252
struct sev_data_tio_dev_disconnect {
253
u32 length;
254
u32 flags;
255
struct spdm_ctrl spdm_ctrl;
256
struct sla_addr_t dev_ctx_sla;
257
} __packed;
258
259
/*
260
* struct sev_data_tio_dev_meas - TIO_DEV_MEASUREMENTS command
261
*
262
* @length: Length in bytes of this command buffer
263
* @flags: Command flags (TIO_DEV_MEAS_FLAG_RAW_BITSTREAM: request raw measurements)
264
* @spdm_ctrl: SPDM control structure defined in Section 5.1
265
* @dev_ctx_sla: Scatter list address of the device context buffer
266
* @meas_nonce: Nonce for measurement freshness verification
267
*/
268
#define TIO_DEV_MEAS_FLAG_RAW_BITSTREAM BIT(0)
269
270
struct sev_data_tio_dev_meas {
271
u32 length;
272
u32 flags;
273
struct spdm_ctrl spdm_ctrl;
274
struct sla_addr_t dev_ctx_sla;
275
u8 meas_nonce[32];
276
} __packed;
277
278
/*
279
* struct sev_data_tio_dev_certs - TIO_DEV_CERTIFICATES command
280
*
281
* @length: Length in bytes of this command buffer
282
* @spdm_ctrl: SPDM control structure defined in Section 5.1
283
* @dev_ctx_sla: Scatter list address of the device context buffer
284
*/
285
struct sev_data_tio_dev_certs {
286
u32 length;
287
u8 reserved[4];
288
struct spdm_ctrl spdm_ctrl;
289
struct sla_addr_t dev_ctx_sla;
290
} __packed;
291
292
/*
293
* struct sev_data_tio_dev_reclaim - TIO_DEV_RECLAIM command
294
*
295
* @length: Length in bytes of this command buffer
296
* @dev_ctx_sla: Scatter list address of the device context buffer
297
*
298
* This command reclaims resources associated with a device context.
299
*/
300
struct sev_data_tio_dev_reclaim {
301
u32 length;
302
u8 reserved[4];
303
struct sla_addr_t dev_ctx_sla;
304
} __packed;
305
306
static struct sla_buffer_hdr *sla_buffer_map(struct sla_addr_t sla)
307
{
308
struct sla_buffer_hdr *buf;
309
310
BUILD_BUG_ON(sizeof(struct sla_buffer_hdr) != 0x40);
311
if (IS_SLA_NULL(sla))
312
return NULL;
313
314
if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
315
struct sla_addr_t *scatter = sla_to_va(sla);
316
unsigned int i, npages = 0;
317
318
for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
319
if (WARN_ON_ONCE(SLA_SZ(scatter[i]) > SZ_4K))
320
return NULL;
321
322
if (WARN_ON_ONCE(scatter[i].page_type == SLA_PAGE_TYPE_SCATTER))
323
return NULL;
324
325
if (IS_SLA_EOL(scatter[i])) {
326
npages = i;
327
break;
328
}
329
}
330
if (WARN_ON_ONCE(!npages))
331
return NULL;
332
333
struct page **pp = kmalloc_array(npages, sizeof(pp[0]), GFP_KERNEL);
334
335
if (!pp)
336
return NULL;
337
338
for (i = 0; i < npages; ++i)
339
pp[i] = sla_to_page(scatter[i]);
340
341
buf = vm_map_ram(pp, npages, 0);
342
kfree(pp);
343
} else {
344
struct page *pg = sla_to_page(sla);
345
346
buf = vm_map_ram(&pg, 1, 0);
347
}
348
349
return buf;
350
}
351
352
static void sla_buffer_unmap(struct sla_addr_t sla, struct sla_buffer_hdr *buf)
353
{
354
if (!buf)
355
return;
356
357
if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
358
struct sla_addr_t *scatter = sla_to_va(sla);
359
unsigned int i, npages = 0;
360
361
for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
362
if (IS_SLA_EOL(scatter[i])) {
363
npages = i;
364
break;
365
}
366
}
367
if (!npages)
368
return;
369
370
vm_unmap_ram(buf, npages);
371
} else {
372
vm_unmap_ram(buf, 1);
373
}
374
}
375
376
static void dobj_response_init(struct sla_buffer_hdr *buf)
377
{
378
struct spdm_dobj_hdr *dobj = sla_to_dobj_hdr(buf);
379
380
dobj->id = SPDM_DOBJ_ID_RESP;
381
dobj->version.major = 0x1;
382
dobj->version.minor = 0;
383
dobj->length = 0;
384
buf->payload_sz = sla_dobj_id_to_size(dobj->id) + dobj->length;
385
}
386
387
static void sla_free(struct sla_addr_t sla, size_t len, bool firmware_state)
388
{
389
unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
390
struct sla_addr_t *scatter = NULL;
391
int ret = 0, i;
392
393
if (IS_SLA_NULL(sla))
394
return;
395
396
if (firmware_state) {
397
if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
398
scatter = sla_to_va(sla);
399
400
for (i = 0; i < npages; ++i) {
401
if (IS_SLA_EOL(scatter[i]))
402
break;
403
404
ret = snp_reclaim_pages(sla_to_pa(scatter[i]), 1, false);
405
if (ret)
406
break;
407
}
408
} else {
409
ret = snp_reclaim_pages(sla_to_pa(sla), 1, false);
410
}
411
}
412
413
if (WARN_ON(ret))
414
return;
415
416
if (scatter) {
417
for (i = 0; i < npages; ++i) {
418
if (IS_SLA_EOL(scatter[i]))
419
break;
420
free_page((unsigned long)sla_to_va(scatter[i]));
421
}
422
}
423
424
free_page((unsigned long)sla_to_va(sla));
425
}
426
427
static struct sla_addr_t sla_alloc(size_t len, bool firmware_state)
428
{
429
unsigned long i, npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
430
struct sla_addr_t *scatter = NULL;
431
struct sla_addr_t ret = SLA_NULL;
432
struct sla_buffer_hdr *buf;
433
struct page *pg;
434
435
if (npages == 0)
436
return ret;
437
438
if (WARN_ON_ONCE(npages > ((PAGE_SIZE / sizeof(struct sla_addr_t)) + 1)))
439
return ret;
440
441
BUILD_BUG_ON(PAGE_SIZE < SZ_4K);
442
443
if (npages > 1) {
444
pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
445
if (!pg)
446
return SLA_NULL;
447
448
ret = make_sla(pg, true);
449
scatter = page_to_virt(pg);
450
for (i = 0; i < npages; ++i) {
451
pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
452
if (!pg)
453
goto no_reclaim_exit;
454
455
scatter[i] = make_sla(pg, false);
456
}
457
scatter[i] = SLA_EOL;
458
} else {
459
pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
460
if (!pg)
461
return SLA_NULL;
462
463
ret = make_sla(pg, false);
464
}
465
466
buf = sla_buffer_map(ret);
467
if (!buf)
468
goto no_reclaim_exit;
469
470
buf->capacity_sz = (npages << PAGE_SHIFT);
471
sla_buffer_unmap(ret, buf);
472
473
if (firmware_state) {
474
if (scatter) {
475
for (i = 0; i < npages; ++i) {
476
if (rmp_make_private(sla_to_pfn(scatter[i]), 0,
477
PG_LEVEL_4K, 0, true))
478
goto free_exit;
479
}
480
} else {
481
if (rmp_make_private(sla_to_pfn(ret), 0, PG_LEVEL_4K, 0, true))
482
goto no_reclaim_exit;
483
}
484
}
485
486
return ret;
487
488
no_reclaim_exit:
489
firmware_state = false;
490
free_exit:
491
sla_free(ret, len, firmware_state);
492
return SLA_NULL;
493
}
494
495
/* Expands a buffer, only firmware owned buffers allowed for now */
496
static int sla_expand(struct sla_addr_t *sla, size_t *len)
497
{
498
struct sla_buffer_hdr *oldbuf = sla_buffer_map(*sla), *newbuf;
499
struct sla_addr_t oldsla = *sla, newsla;
500
size_t oldlen = *len, newlen;
501
502
if (!oldbuf)
503
return -EFAULT;
504
505
newlen = oldbuf->capacity_sz;
506
if (oldbuf->capacity_sz == oldlen) {
507
/* This buffer does not require expansion, must be another buffer */
508
sla_buffer_unmap(oldsla, oldbuf);
509
return 1;
510
}
511
512
pr_notice("Expanding BUFFER from %ld to %ld bytes\n", oldlen, newlen);
513
514
newsla = sla_alloc(newlen, true);
515
if (IS_SLA_NULL(newsla))
516
return -ENOMEM;
517
518
newbuf = sla_buffer_map(newsla);
519
if (!newbuf) {
520
sla_free(newsla, newlen, true);
521
return -EFAULT;
522
}
523
524
memcpy(newbuf, oldbuf, oldlen);
525
526
sla_buffer_unmap(newsla, newbuf);
527
sla_free(oldsla, oldlen, true);
528
*sla = newsla;
529
*len = newlen;
530
531
return 0;
532
}
533
534
static int sev_tio_do_cmd(int cmd, void *data, size_t data_len, int *psp_ret,
535
struct tsm_dsm_tio *dev_data)
536
{
537
int rc;
538
539
*psp_ret = 0;
540
rc = sev_do_cmd(cmd, data, psp_ret);
541
542
if (WARN_ON(!rc && *psp_ret == SEV_RET_SPDM_REQUEST))
543
return -EIO;
544
545
if (rc == 0 && *psp_ret == SEV_RET_EXPAND_BUFFER_LENGTH_REQUEST) {
546
int rc1, rc2;
547
548
rc1 = sla_expand(&dev_data->output, &dev_data->output_len);
549
if (rc1 < 0)
550
return rc1;
551
552
rc2 = sla_expand(&dev_data->scratch, &dev_data->scratch_len);
553
if (rc2 < 0)
554
return rc2;
555
556
if (!rc1 && !rc2)
557
/* Neither buffer requires expansion, this is wrong */
558
return -EFAULT;
559
560
*psp_ret = 0;
561
rc = sev_do_cmd(cmd, data, psp_ret);
562
}
563
564
if ((rc == 0 || rc == -EIO) && *psp_ret == SEV_RET_SPDM_REQUEST) {
565
struct spdm_dobj_hdr_resp *resp_hdr;
566
struct spdm_dobj_hdr_req *req_hdr;
567
struct sev_tio_status *tio_status = to_tio_status(dev_data);
568
size_t resp_len = tio_status->spdm_req_size_max -
569
(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + sizeof(struct sla_buffer_hdr));
570
571
if (!dev_data->cmd) {
572
if (WARN_ON_ONCE(!data_len || (data_len != *(u32 *) data)))
573
return -EINVAL;
574
if (WARN_ON(data_len > sizeof(dev_data->cmd_data)))
575
return -EFAULT;
576
memcpy(dev_data->cmd_data, data, data_len);
577
memset(&dev_data->cmd_data[data_len], 0xFF,
578
sizeof(dev_data->cmd_data) - data_len);
579
dev_data->cmd = cmd;
580
}
581
582
req_hdr = sla_to_dobj_req_hdr(dev_data->reqbuf);
583
resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
584
switch (req_hdr->data_type) {
585
case DOBJ_DATA_TYPE_SPDM:
586
rc = PCI_DOE_FEATURE_CMA;
587
break;
588
case DOBJ_DATA_TYPE_SECURE_SPDM:
589
rc = PCI_DOE_FEATURE_SSESSION;
590
break;
591
default:
592
return -EINVAL;
593
}
594
resp_hdr->data_type = req_hdr->data_type;
595
dev_data->spdm.req_len = req_hdr->hdr.length -
596
sla_dobj_id_to_size(SPDM_DOBJ_ID_REQ);
597
dev_data->spdm.rsp_len = resp_len;
598
} else if (dev_data && dev_data->cmd) {
599
/* For either error or success just stop the bouncing */
600
memset(dev_data->cmd_data, 0, sizeof(dev_data->cmd_data));
601
dev_data->cmd = 0;
602
}
603
604
return rc;
605
}
606
607
int sev_tio_continue(struct tsm_dsm_tio *dev_data)
608
{
609
struct spdm_dobj_hdr_resp *resp_hdr;
610
int ret;
611
612
if (!dev_data || !dev_data->cmd)
613
return -EINVAL;
614
615
resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
616
resp_hdr->hdr.length = ALIGN(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
617
dev_data->spdm.rsp_len, 32);
618
dev_data->respbuf->payload_sz = resp_hdr->hdr.length;
619
620
ret = sev_tio_do_cmd(dev_data->cmd, dev_data->cmd_data, 0,
621
&dev_data->psp_ret, dev_data);
622
if (ret)
623
return ret;
624
625
if (dev_data->psp_ret != SEV_RET_SUCCESS)
626
return -EINVAL;
627
628
return 0;
629
}
630
631
static void spdm_ctrl_init(struct spdm_ctrl *ctrl, struct tsm_dsm_tio *dev_data)
632
{
633
ctrl->req = dev_data->req;
634
ctrl->resp = dev_data->resp;
635
ctrl->scratch = dev_data->scratch;
636
ctrl->output = dev_data->output;
637
}
638
639
static void spdm_ctrl_free(struct tsm_dsm_tio *dev_data)
640
{
641
struct sev_tio_status *tio_status = to_tio_status(dev_data);
642
size_t len = tio_status->spdm_req_size_max -
643
(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
644
sizeof(struct sla_buffer_hdr));
645
struct tsm_spdm *spdm = &dev_data->spdm;
646
647
sla_buffer_unmap(dev_data->resp, dev_data->respbuf);
648
sla_buffer_unmap(dev_data->req, dev_data->reqbuf);
649
spdm->rsp = NULL;
650
spdm->req = NULL;
651
sla_free(dev_data->req, len, true);
652
sla_free(dev_data->resp, len, false);
653
sla_free(dev_data->scratch, tio_status->spdm_scratch_size_max, true);
654
655
dev_data->req.sla = 0;
656
dev_data->resp.sla = 0;
657
dev_data->scratch.sla = 0;
658
dev_data->respbuf = NULL;
659
dev_data->reqbuf = NULL;
660
sla_free(dev_data->output, tio_status->spdm_out_size_max, true);
661
}
662
663
static int spdm_ctrl_alloc(struct tsm_dsm_tio *dev_data)
664
{
665
struct sev_tio_status *tio_status = to_tio_status(dev_data);
666
struct tsm_spdm *spdm = &dev_data->spdm;
667
int ret;
668
669
dev_data->req = sla_alloc(tio_status->spdm_req_size_max, true);
670
dev_data->resp = sla_alloc(tio_status->spdm_req_size_max, false);
671
dev_data->scratch_len = tio_status->spdm_scratch_size_max;
672
dev_data->scratch = sla_alloc(dev_data->scratch_len, true);
673
dev_data->output_len = tio_status->spdm_out_size_max;
674
dev_data->output = sla_alloc(dev_data->output_len, true);
675
676
if (IS_SLA_NULL(dev_data->req) || IS_SLA_NULL(dev_data->resp) ||
677
IS_SLA_NULL(dev_data->scratch) || IS_SLA_NULL(dev_data->dev_ctx)) {
678
ret = -ENOMEM;
679
goto free_spdm_exit;
680
}
681
682
dev_data->reqbuf = sla_buffer_map(dev_data->req);
683
dev_data->respbuf = sla_buffer_map(dev_data->resp);
684
if (!dev_data->reqbuf || !dev_data->respbuf) {
685
ret = -EFAULT;
686
goto free_spdm_exit;
687
}
688
689
spdm->req = sla_to_data(dev_data->reqbuf, SPDM_DOBJ_ID_REQ);
690
spdm->rsp = sla_to_data(dev_data->respbuf, SPDM_DOBJ_ID_RESP);
691
if (!spdm->req || !spdm->rsp) {
692
ret = -EFAULT;
693
goto free_spdm_exit;
694
}
695
696
dobj_response_init(dev_data->respbuf);
697
698
return 0;
699
700
free_spdm_exit:
701
spdm_ctrl_free(dev_data);
702
return ret;
703
}
704
705
int sev_tio_init_locked(void *tio_status_page)
706
{
707
struct sev_tio_status *tio_status = tio_status_page;
708
struct sev_data_tio_status data_status = {
709
.length = sizeof(data_status),
710
};
711
int ret, psp_ret;
712
713
data_status.status_paddr = __psp_pa(tio_status_page);
714
ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
715
if (ret)
716
return ret;
717
718
if (tio_status->length < offsetofend(struct sev_tio_status, tdictx_size) ||
719
tio_status->reserved)
720
return -EFAULT;
721
722
if (!tio_status->tio_en && !tio_status->tio_init_done)
723
return -ENOENT;
724
725
if (tio_status->tio_init_done)
726
return -EBUSY;
727
728
struct sev_data_tio_init ti = { .length = sizeof(ti) };
729
730
ret = __sev_do_cmd_locked(SEV_CMD_TIO_INIT, &ti, &psp_ret);
731
if (ret)
732
return ret;
733
734
ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
735
if (ret)
736
return ret;
737
738
return 0;
739
}
740
741
int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id,
742
u16 root_port_id, u8 segment_id)
743
{
744
struct sev_tio_status *tio_status = to_tio_status(dev_data);
745
struct sev_data_tio_dev_create create = {
746
.length = sizeof(create),
747
.device_id = device_id,
748
.root_port_id = root_port_id,
749
.segment_id = segment_id,
750
};
751
void *data_pg;
752
int ret;
753
754
dev_data->dev_ctx = sla_alloc(tio_status->devctx_size, true);
755
if (IS_SLA_NULL(dev_data->dev_ctx))
756
return -ENOMEM;
757
758
data_pg = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT);
759
if (!data_pg) {
760
ret = -ENOMEM;
761
goto free_ctx_exit;
762
}
763
764
create.dev_ctx_sla = dev_data->dev_ctx;
765
ret = sev_do_cmd(SEV_CMD_TIO_DEV_CREATE, &create, &dev_data->psp_ret);
766
if (ret)
767
goto free_data_pg_exit;
768
769
dev_data->data_pg = data_pg;
770
771
return 0;
772
773
free_data_pg_exit:
774
snp_free_firmware_page(data_pg);
775
free_ctx_exit:
776
sla_free(create.dev_ctx_sla, tio_status->devctx_size, true);
777
return ret;
778
}
779
780
int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data)
781
{
782
struct sev_tio_status *tio_status = to_tio_status(dev_data);
783
struct sev_data_tio_dev_reclaim r = {
784
.length = sizeof(r),
785
.dev_ctx_sla = dev_data->dev_ctx,
786
};
787
int ret;
788
789
if (dev_data->data_pg) {
790
snp_free_firmware_page(dev_data->data_pg);
791
dev_data->data_pg = NULL;
792
}
793
794
if (IS_SLA_NULL(dev_data->dev_ctx))
795
return 0;
796
797
ret = sev_do_cmd(SEV_CMD_TIO_DEV_RECLAIM, &r, &dev_data->psp_ret);
798
799
sla_free(dev_data->dev_ctx, tio_status->devctx_size, true);
800
dev_data->dev_ctx = SLA_NULL;
801
802
spdm_ctrl_free(dev_data);
803
804
return ret;
805
}
806
807
int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot)
808
{
809
struct sev_data_tio_dev_connect connect = {
810
.length = sizeof(connect),
811
.tc_mask = tc_mask,
812
.cert_slot = cert_slot,
813
.dev_ctx_sla = dev_data->dev_ctx,
814
.ide_stream_id = {
815
ids[0], ids[1], ids[2], ids[3],
816
ids[4], ids[5], ids[6], ids[7]
817
},
818
};
819
int ret;
820
821
if (WARN_ON(IS_SLA_NULL(dev_data->dev_ctx)))
822
return -EFAULT;
823
if (!(tc_mask & 1))
824
return -EINVAL;
825
826
ret = spdm_ctrl_alloc(dev_data);
827
if (ret)
828
return ret;
829
830
spdm_ctrl_init(&connect.spdm_ctrl, dev_data);
831
832
return sev_tio_do_cmd(SEV_CMD_TIO_DEV_CONNECT, &connect, sizeof(connect),
833
&dev_data->psp_ret, dev_data);
834
}
835
836
int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force)
837
{
838
struct sev_data_tio_dev_disconnect dc = {
839
.length = sizeof(dc),
840
.dev_ctx_sla = dev_data->dev_ctx,
841
.flags = force ? TIO_DEV_DISCONNECT_FLAG_FORCE : 0,
842
};
843
844
if (WARN_ON_ONCE(IS_SLA_NULL(dev_data->dev_ctx)))
845
return -EFAULT;
846
847
spdm_ctrl_init(&dc.spdm_ctrl, dev_data);
848
849
return sev_tio_do_cmd(SEV_CMD_TIO_DEV_DISCONNECT, &dc, sizeof(dc),
850
&dev_data->psp_ret, dev_data);
851
}
852
853
int sev_tio_cmd_buffer_len(int cmd)
854
{
855
switch (cmd) {
856
case SEV_CMD_TIO_STATUS: return sizeof(struct sev_data_tio_status);
857
case SEV_CMD_TIO_INIT: return sizeof(struct sev_data_tio_init);
858
case SEV_CMD_TIO_DEV_CREATE: return sizeof(struct sev_data_tio_dev_create);
859
case SEV_CMD_TIO_DEV_RECLAIM: return sizeof(struct sev_data_tio_dev_reclaim);
860
case SEV_CMD_TIO_DEV_CONNECT: return sizeof(struct sev_data_tio_dev_connect);
861
case SEV_CMD_TIO_DEV_DISCONNECT: return sizeof(struct sev_data_tio_dev_disconnect);
862
default: return 0;
863
}
864
}
865
866