Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/ccp/sev-dev-tsm.c
50005 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
// Interface to CCP/SEV-TIO for generic PCIe TDISP module
4
5
#include <linux/pci.h>
6
#include <linux/device.h>
7
#include <linux/tsm.h>
8
#include <linux/iommu.h>
9
#include <linux/pci-doe.h>
10
#include <linux/bitfield.h>
11
#include <linux/module.h>
12
13
#include <asm/sev-common.h>
14
#include <asm/sev.h>
15
16
#include "psp-dev.h"
17
#include "sev-dev.h"
18
#include "sev-dev-tio.h"
19
20
MODULE_IMPORT_NS("PCI_IDE");
21
22
#define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev))
23
#define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data))
24
#define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data))
25
#define tsm_dev_to_sev(tsmdev) dev_to_sev((tsmdev)->dev.parent)
26
27
#define pdev_to_tio_dsm(pdev) (container_of((pdev)->tsm, struct tio_dsm, tsm.base_tsm))
28
29
static int sev_tio_spdm_cmd(struct tio_dsm *dsm, int ret)
30
{
31
struct tsm_dsm_tio *dev_data = &dsm->data;
32
struct tsm_spdm *spdm = &dev_data->spdm;
33
34
/* Check the main command handler response before entering the loop */
35
if (ret == 0 && dev_data->psp_ret != SEV_RET_SUCCESS)
36
return -EINVAL;
37
38
if (ret <= 0)
39
return ret;
40
41
/* ret > 0 means "SPDM requested" */
42
while (ret == PCI_DOE_FEATURE_CMA || ret == PCI_DOE_FEATURE_SSESSION) {
43
ret = pci_doe(dsm->tsm.doe_mb, PCI_VENDOR_ID_PCI_SIG, ret,
44
spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len);
45
if (ret < 0)
46
break;
47
48
WARN_ON_ONCE(ret == 0); /* The response should never be empty */
49
spdm->rsp_len = ret;
50
ret = sev_tio_continue(dev_data);
51
}
52
53
return ret;
54
}
55
56
static int stream_enable(struct pci_ide *ide)
57
{
58
struct pci_dev *rp = pcie_find_root_port(ide->pdev);
59
int ret;
60
61
ret = pci_ide_stream_enable(rp, ide);
62
if (ret)
63
return ret;
64
65
ret = pci_ide_stream_enable(ide->pdev, ide);
66
if (ret)
67
pci_ide_stream_disable(rp, ide);
68
69
return ret;
70
}
71
72
static int streams_enable(struct pci_ide **ide)
73
{
74
int ret = 0;
75
76
for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
77
if (ide[i]) {
78
ret = stream_enable(ide[i]);
79
if (ret)
80
break;
81
}
82
}
83
84
return ret;
85
}
86
87
static void stream_disable(struct pci_ide *ide)
88
{
89
pci_ide_stream_disable(ide->pdev, ide);
90
pci_ide_stream_disable(pcie_find_root_port(ide->pdev), ide);
91
}
92
93
static void streams_disable(struct pci_ide **ide)
94
{
95
for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
96
if (ide[i])
97
stream_disable(ide[i]);
98
}
99
100
static void stream_setup(struct pci_ide *ide)
101
{
102
struct pci_dev *rp = pcie_find_root_port(ide->pdev);
103
104
ide->partner[PCI_IDE_EP].rid_start = 0;
105
ide->partner[PCI_IDE_EP].rid_end = 0xffff;
106
ide->partner[PCI_IDE_RP].rid_start = 0;
107
ide->partner[PCI_IDE_RP].rid_end = 0xffff;
108
109
ide->pdev->ide_cfg = 0;
110
ide->pdev->ide_tee_limit = 1;
111
rp->ide_cfg = 1;
112
rp->ide_tee_limit = 0;
113
114
pci_warn(ide->pdev, "Forcing CFG/TEE for %s", pci_name(rp));
115
pci_ide_stream_setup(ide->pdev, ide);
116
pci_ide_stream_setup(rp, ide);
117
}
118
119
static u8 streams_setup(struct pci_ide **ide, u8 *ids)
120
{
121
bool def = false;
122
u8 tc_mask = 0;
123
int i;
124
125
for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
126
if (!ide[i]) {
127
ids[i] = 0xFF;
128
continue;
129
}
130
131
tc_mask |= BIT(i);
132
ids[i] = ide[i]->stream_id;
133
134
if (!def) {
135
struct pci_ide_partner *settings;
136
137
settings = pci_ide_to_settings(ide[i]->pdev, ide[i]);
138
settings->default_stream = 1;
139
def = true;
140
}
141
142
stream_setup(ide[i]);
143
}
144
145
return tc_mask;
146
}
147
148
static int streams_register(struct pci_ide **ide)
149
{
150
int ret = 0, i;
151
152
for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
153
if (ide[i]) {
154
ret = pci_ide_stream_register(ide[i]);
155
if (ret)
156
break;
157
}
158
}
159
160
return ret;
161
}
162
163
static void streams_unregister(struct pci_ide **ide)
164
{
165
for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
166
if (ide[i])
167
pci_ide_stream_unregister(ide[i]);
168
}
169
170
static void stream_teardown(struct pci_ide *ide)
171
{
172
pci_ide_stream_teardown(ide->pdev, ide);
173
pci_ide_stream_teardown(pcie_find_root_port(ide->pdev), ide);
174
}
175
176
static void streams_teardown(struct pci_ide **ide)
177
{
178
for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
179
if (ide[i]) {
180
stream_teardown(ide[i]);
181
pci_ide_stream_free(ide[i]);
182
ide[i] = NULL;
183
}
184
}
185
}
186
187
static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide,
188
unsigned int tc)
189
{
190
struct pci_ide *ide1;
191
192
if (ide[tc]) {
193
pci_err(pdev, "Stream for class=%d already registered", tc);
194
return -EBUSY;
195
}
196
197
ide1 = pci_ide_stream_alloc(pdev);
198
if (!ide1)
199
return -EFAULT;
200
201
ide1->stream_id = ide1->host_bridge_stream;
202
203
ide[tc] = ide1;
204
205
return 0;
206
}
207
208
static struct pci_tsm *tio_pf0_probe(struct pci_dev *pdev, struct sev_device *sev)
209
{
210
struct tio_dsm *dsm __free(kfree) = kzalloc(sizeof(*dsm), GFP_KERNEL);
211
int rc;
212
213
if (!dsm)
214
return NULL;
215
216
rc = pci_tsm_pf0_constructor(pdev, &dsm->tsm, sev->tsmdev);
217
if (rc)
218
return NULL;
219
220
pci_dbg(pdev, "TSM enabled\n");
221
dsm->sev = sev;
222
return &no_free_ptr(dsm)->tsm.base_tsm;
223
}
224
225
static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev)
226
{
227
struct sev_device *sev = tsm_dev_to_sev(tsmdev);
228
229
if (is_pci_tsm_pf0(pdev))
230
return tio_pf0_probe(pdev, sev);
231
return 0;
232
}
233
234
static void dsm_remove(struct pci_tsm *tsm)
235
{
236
struct pci_dev *pdev = tsm->pdev;
237
238
pci_dbg(pdev, "TSM disabled\n");
239
240
if (is_pci_tsm_pf0(pdev)) {
241
struct tio_dsm *dsm = container_of(tsm, struct tio_dsm, tsm.base_tsm);
242
243
pci_tsm_pf0_destructor(&dsm->tsm);
244
kfree(dsm);
245
}
246
}
247
248
static int dsm_create(struct tio_dsm *dsm)
249
{
250
struct pci_dev *pdev = dsm->tsm.base_tsm.pdev;
251
u8 segment_id = pdev->bus ? pci_domain_nr(pdev->bus) : 0;
252
struct pci_dev *rootport = pcie_find_root_port(pdev);
253
u16 device_id = pci_dev_id(pdev);
254
u16 root_port_id;
255
u32 lnkcap = 0;
256
257
if (pci_read_config_dword(rootport, pci_pcie_cap(rootport) + PCI_EXP_LNKCAP,
258
&lnkcap))
259
return -ENODEV;
260
261
root_port_id = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
262
263
return sev_tio_dev_create(&dsm->data, device_id, root_port_id, segment_id);
264
}
265
266
static int dsm_connect(struct pci_dev *pdev)
267
{
268
struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
269
struct tsm_dsm_tio *dev_data = &dsm->data;
270
u8 ids[TIO_IDE_MAX_TC];
271
u8 tc_mask;
272
int ret;
273
274
if (pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
275
PCI_DOE_FEATURE_SSESSION) != dsm->tsm.doe_mb) {
276
pci_err(pdev, "CMA DOE MB must support SSESSION\n");
277
return -EFAULT;
278
}
279
280
ret = stream_alloc(pdev, dev_data->ide, 0);
281
if (ret)
282
return ret;
283
284
ret = dsm_create(dsm);
285
if (ret)
286
goto ide_free_exit;
287
288
tc_mask = streams_setup(dev_data->ide, ids);
289
290
ret = sev_tio_dev_connect(dev_data, tc_mask, ids, dev_data->cert_slot);
291
ret = sev_tio_spdm_cmd(dsm, ret);
292
if (ret)
293
goto free_exit;
294
295
streams_enable(dev_data->ide);
296
297
ret = streams_register(dev_data->ide);
298
if (ret)
299
goto free_exit;
300
301
return 0;
302
303
free_exit:
304
sev_tio_dev_reclaim(dev_data);
305
306
streams_disable(dev_data->ide);
307
ide_free_exit:
308
309
streams_teardown(dev_data->ide);
310
311
return ret;
312
}
313
314
static void dsm_disconnect(struct pci_dev *pdev)
315
{
316
bool force = SYSTEM_HALT <= system_state && system_state <= SYSTEM_RESTART;
317
struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
318
struct tsm_dsm_tio *dev_data = &dsm->data;
319
int ret;
320
321
ret = sev_tio_dev_disconnect(dev_data, force);
322
ret = sev_tio_spdm_cmd(dsm, ret);
323
if (ret && !force) {
324
ret = sev_tio_dev_disconnect(dev_data, true);
325
sev_tio_spdm_cmd(dsm, ret);
326
}
327
328
sev_tio_dev_reclaim(dev_data);
329
330
streams_disable(dev_data->ide);
331
streams_unregister(dev_data->ide);
332
streams_teardown(dev_data->ide);
333
}
334
335
static struct pci_tsm_ops sev_tsm_ops = {
336
.probe = dsm_probe,
337
.remove = dsm_remove,
338
.connect = dsm_connect,
339
.disconnect = dsm_disconnect,
340
};
341
342
void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page)
343
{
344
struct sev_tio_status *t = kzalloc(sizeof(*t), GFP_KERNEL);
345
struct tsm_dev *tsmdev;
346
int ret;
347
348
WARN_ON(sev->tio_status);
349
350
if (!t)
351
return;
352
353
ret = sev_tio_init_locked(tio_status_page);
354
if (ret) {
355
pr_warn("SEV-TIO STATUS failed with %d\n", ret);
356
goto error_exit;
357
}
358
359
tsmdev = tsm_register(sev->dev, &sev_tsm_ops);
360
if (IS_ERR(tsmdev))
361
goto error_exit;
362
363
memcpy(t, tio_status_page, sizeof(*t));
364
365
pr_notice("SEV-TIO status: EN=%d INIT_DONE=%d rq=%d..%d rs=%d..%d "
366
"scr=%d..%d out=%d..%d dev=%d tdi=%d algos=%x\n",
367
t->tio_en, t->tio_init_done,
368
t->spdm_req_size_min, t->spdm_req_size_max,
369
t->spdm_rsp_size_min, t->spdm_rsp_size_max,
370
t->spdm_scratch_size_min, t->spdm_scratch_size_max,
371
t->spdm_out_size_min, t->spdm_out_size_max,
372
t->devctx_size, t->tdictx_size,
373
t->tio_crypto_alg);
374
375
sev->tsmdev = tsmdev;
376
sev->tio_status = t;
377
378
return;
379
380
error_exit:
381
kfree(t);
382
pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n",
383
ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV));
384
}
385
386
void sev_tsm_uninit(struct sev_device *sev)
387
{
388
if (sev->tsmdev)
389
tsm_unregister(sev->tsmdev);
390
391
sev->tsmdev = NULL;
392
}
393
394