Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/fwctl/pds/main.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright(c) Advanced Micro Devices, Inc */
3
4
#include <linux/module.h>
5
#include <linux/auxiliary_bus.h>
6
#include <linux/pci.h>
7
#include <linux/vmalloc.h>
8
#include <linux/bitfield.h>
9
10
#include <uapi/fwctl/fwctl.h>
11
#include <uapi/fwctl/pds.h>
12
#include <linux/fwctl.h>
13
14
#include <linux/pds/pds_common.h>
15
#include <linux/pds/pds_core_if.h>
16
#include <linux/pds/pds_adminq.h>
17
#include <linux/pds/pds_auxbus.h>
18
19
struct pdsfc_uctx {
20
struct fwctl_uctx uctx;
21
u32 uctx_caps;
22
};
23
24
struct pdsfc_rpc_endpoint_info {
25
u32 endpoint;
26
dma_addr_t operations_pa;
27
struct pds_fwctl_query_data *operations;
28
struct mutex lock; /* lock for endpoint info management */
29
};
30
31
struct pdsfc_dev {
32
struct fwctl_device fwctl;
33
struct pds_auxiliary_dev *padev;
34
u32 caps;
35
struct pds_fwctl_ident ident;
36
dma_addr_t endpoints_pa;
37
struct pds_fwctl_query_data *endpoints;
38
struct pdsfc_rpc_endpoint_info *endpoint_info;
39
};
40
41
static int pdsfc_open_uctx(struct fwctl_uctx *uctx)
42
{
43
struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl);
44
struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx);
45
46
pdsfc_uctx->uctx_caps = pdsfc->caps;
47
48
return 0;
49
}
50
51
static void pdsfc_close_uctx(struct fwctl_uctx *uctx)
52
{
53
}
54
55
static void *pdsfc_info(struct fwctl_uctx *uctx, size_t *length)
56
{
57
struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx);
58
struct fwctl_info_pds *info;
59
60
info = kzalloc(sizeof(*info), GFP_KERNEL);
61
if (!info)
62
return ERR_PTR(-ENOMEM);
63
64
info->uctx_caps = pdsfc_uctx->uctx_caps;
65
66
return info;
67
}
68
69
static int pdsfc_identify(struct pdsfc_dev *pdsfc)
70
{
71
struct device *dev = &pdsfc->fwctl.dev;
72
union pds_core_adminq_comp comp = {0};
73
union pds_core_adminq_cmd cmd;
74
struct pds_fwctl_ident *ident;
75
dma_addr_t ident_pa;
76
int err;
77
78
ident = dma_alloc_coherent(dev->parent, sizeof(*ident), &ident_pa, GFP_KERNEL);
79
if (!ident) {
80
dev_err(dev, "Failed to map ident buffer\n");
81
return -ENOMEM;
82
}
83
84
cmd = (union pds_core_adminq_cmd) {
85
.fwctl_ident = {
86
.opcode = PDS_FWCTL_CMD_IDENT,
87
.version = 0,
88
.len = cpu_to_le32(sizeof(*ident)),
89
.ident_pa = cpu_to_le64(ident_pa),
90
}
91
};
92
93
err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
94
if (err)
95
dev_err(dev, "Failed to send adminq cmd opcode: %u err: %d\n",
96
cmd.fwctl_ident.opcode, err);
97
else
98
pdsfc->ident = *ident;
99
100
dma_free_coherent(dev->parent, sizeof(*ident), ident, ident_pa);
101
102
return err;
103
}
104
105
static void pdsfc_free_endpoints(struct pdsfc_dev *pdsfc)
106
{
107
struct device *dev = &pdsfc->fwctl.dev;
108
u32 num_endpoints;
109
int i;
110
111
if (!pdsfc->endpoints)
112
return;
113
114
num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
115
for (i = 0; pdsfc->endpoint_info && i < num_endpoints; i++)
116
mutex_destroy(&pdsfc->endpoint_info[i].lock);
117
vfree(pdsfc->endpoint_info);
118
pdsfc->endpoint_info = NULL;
119
dma_free_coherent(dev->parent, PAGE_SIZE,
120
pdsfc->endpoints, pdsfc->endpoints_pa);
121
pdsfc->endpoints = NULL;
122
pdsfc->endpoints_pa = DMA_MAPPING_ERROR;
123
}
124
125
static void pdsfc_free_operations(struct pdsfc_dev *pdsfc)
126
{
127
struct device *dev = &pdsfc->fwctl.dev;
128
u32 num_endpoints;
129
int i;
130
131
num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
132
for (i = 0; i < num_endpoints; i++) {
133
struct pdsfc_rpc_endpoint_info *ei = &pdsfc->endpoint_info[i];
134
135
if (ei->operations) {
136
dma_free_coherent(dev->parent, PAGE_SIZE,
137
ei->operations, ei->operations_pa);
138
ei->operations = NULL;
139
ei->operations_pa = DMA_MAPPING_ERROR;
140
}
141
}
142
}
143
144
static struct pds_fwctl_query_data *pdsfc_get_endpoints(struct pdsfc_dev *pdsfc,
145
dma_addr_t *pa)
146
{
147
struct device *dev = &pdsfc->fwctl.dev;
148
union pds_core_adminq_comp comp = {0};
149
struct pds_fwctl_query_data *data;
150
union pds_core_adminq_cmd cmd;
151
dma_addr_t data_pa;
152
int err;
153
154
data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
155
if (!data) {
156
dev_err(dev, "Failed to map endpoint list\n");
157
return ERR_PTR(-ENOMEM);
158
}
159
160
cmd = (union pds_core_adminq_cmd) {
161
.fwctl_query = {
162
.opcode = PDS_FWCTL_CMD_QUERY,
163
.entity = PDS_FWCTL_RPC_ROOT,
164
.version = 0,
165
.query_data_buf_len = cpu_to_le32(PAGE_SIZE),
166
.query_data_buf_pa = cpu_to_le64(data_pa),
167
}
168
};
169
170
err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
171
if (err) {
172
dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n",
173
cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err);
174
dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
175
return ERR_PTR(err);
176
}
177
178
*pa = data_pa;
179
180
return data;
181
}
182
183
static int pdsfc_init_endpoints(struct pdsfc_dev *pdsfc)
184
{
185
struct pds_fwctl_query_data_endpoint *ep_entry;
186
u32 num_endpoints;
187
int i;
188
189
pdsfc->endpoints = pdsfc_get_endpoints(pdsfc, &pdsfc->endpoints_pa);
190
if (IS_ERR(pdsfc->endpoints))
191
return PTR_ERR(pdsfc->endpoints);
192
193
num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
194
pdsfc->endpoint_info = vcalloc(num_endpoints,
195
sizeof(*pdsfc->endpoint_info));
196
if (!pdsfc->endpoint_info) {
197
pdsfc_free_endpoints(pdsfc);
198
return -ENOMEM;
199
}
200
201
ep_entry = (struct pds_fwctl_query_data_endpoint *)pdsfc->endpoints->entries;
202
for (i = 0; i < num_endpoints; i++) {
203
mutex_init(&pdsfc->endpoint_info[i].lock);
204
pdsfc->endpoint_info[i].endpoint = le32_to_cpu(ep_entry[i].id);
205
}
206
207
return 0;
208
}
209
210
static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc,
211
dma_addr_t *pa, u32 ep)
212
{
213
struct pds_fwctl_query_data_operation *entries;
214
struct device *dev = &pdsfc->fwctl.dev;
215
union pds_core_adminq_comp comp = {0};
216
struct pds_fwctl_query_data *data;
217
union pds_core_adminq_cmd cmd;
218
dma_addr_t data_pa;
219
u32 num_entries;
220
int err;
221
int i;
222
223
/* Query the operations list for the given endpoint */
224
data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
225
if (!data) {
226
dev_err(dev, "Failed to map operations list\n");
227
return ERR_PTR(-ENOMEM);
228
}
229
230
cmd = (union pds_core_adminq_cmd) {
231
.fwctl_query = {
232
.opcode = PDS_FWCTL_CMD_QUERY,
233
.entity = PDS_FWCTL_RPC_ENDPOINT,
234
.version = 0,
235
.query_data_buf_len = cpu_to_le32(PAGE_SIZE),
236
.query_data_buf_pa = cpu_to_le64(data_pa),
237
.ep = cpu_to_le32(ep),
238
}
239
};
240
241
err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
242
if (err) {
243
dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n",
244
cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err);
245
dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
246
return ERR_PTR(err);
247
}
248
249
*pa = data_pa;
250
251
entries = (struct pds_fwctl_query_data_operation *)data->entries;
252
num_entries = le32_to_cpu(data->num_entries);
253
dev_dbg(dev, "num_entries %d\n", num_entries);
254
for (i = 0; i < num_entries; i++) {
255
256
/* Translate FW command attribute to fwctl scope */
257
switch (entries[i].scope) {
258
case PDSFC_FW_CMD_ATTR_READ:
259
case PDSFC_FW_CMD_ATTR_WRITE:
260
case PDSFC_FW_CMD_ATTR_SYNC:
261
entries[i].scope = FWCTL_RPC_CONFIGURATION;
262
break;
263
case PDSFC_FW_CMD_ATTR_DEBUG_READ:
264
entries[i].scope = FWCTL_RPC_DEBUG_READ_ONLY;
265
break;
266
case PDSFC_FW_CMD_ATTR_DEBUG_WRITE:
267
entries[i].scope = FWCTL_RPC_DEBUG_WRITE;
268
break;
269
default:
270
entries[i].scope = FWCTL_RPC_DEBUG_WRITE_FULL;
271
break;
272
}
273
dev_dbg(dev, "endpoint %d operation: id %x scope %d\n",
274
ep, le32_to_cpu(entries[i].id), entries[i].scope);
275
}
276
277
return data;
278
}
279
280
static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc,
281
struct fwctl_rpc_pds *rpc,
282
enum fwctl_rpc_scope scope)
283
{
284
struct pds_fwctl_query_data_operation *op_entry;
285
struct pdsfc_rpc_endpoint_info *ep_info = NULL;
286
struct device *dev = &pdsfc->fwctl.dev;
287
u32 num_entries;
288
int i;
289
290
/* validate rpc in_len & out_len based
291
* on ident.max_req_sz & max_resp_sz
292
*/
293
if (rpc->in.len > le32_to_cpu(pdsfc->ident.max_req_sz)) {
294
dev_dbg(dev, "Invalid request size %u, max %u\n",
295
rpc->in.len, le32_to_cpu(pdsfc->ident.max_req_sz));
296
return -EINVAL;
297
}
298
299
if (rpc->out.len > le32_to_cpu(pdsfc->ident.max_resp_sz)) {
300
dev_dbg(dev, "Invalid response size %u, max %u\n",
301
rpc->out.len, le32_to_cpu(pdsfc->ident.max_resp_sz));
302
return -EINVAL;
303
}
304
305
num_entries = le32_to_cpu(pdsfc->endpoints->num_entries);
306
for (i = 0; i < num_entries; i++) {
307
if (pdsfc->endpoint_info[i].endpoint == rpc->in.ep) {
308
ep_info = &pdsfc->endpoint_info[i];
309
break;
310
}
311
}
312
if (!ep_info) {
313
dev_dbg(dev, "Invalid endpoint %d\n", rpc->in.ep);
314
return -EINVAL;
315
}
316
317
/* query and cache this endpoint's operations */
318
mutex_lock(&ep_info->lock);
319
if (!ep_info->operations) {
320
struct pds_fwctl_query_data *operations;
321
322
operations = pdsfc_get_operations(pdsfc,
323
&ep_info->operations_pa,
324
rpc->in.ep);
325
if (IS_ERR(operations)) {
326
mutex_unlock(&ep_info->lock);
327
return -ENOMEM;
328
}
329
ep_info->operations = operations;
330
}
331
mutex_unlock(&ep_info->lock);
332
333
/* reject unsupported and/or out of scope commands */
334
op_entry = (struct pds_fwctl_query_data_operation *)ep_info->operations->entries;
335
num_entries = le32_to_cpu(ep_info->operations->num_entries);
336
for (i = 0; i < num_entries; i++) {
337
if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, le32_to_cpu(op_entry[i].id))) {
338
if (scope < op_entry[i].scope)
339
return -EPERM;
340
return 0;
341
}
342
}
343
344
dev_dbg(dev, "Invalid operation %d for endpoint %d\n", rpc->in.op, rpc->in.ep);
345
346
return -EINVAL;
347
}
348
349
static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
350
void *in, size_t in_len, size_t *out_len)
351
{
352
struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl);
353
struct device *dev = &uctx->fwctl->dev;
354
union pds_core_adminq_comp comp = {0};
355
dma_addr_t out_payload_dma_addr = 0;
356
dma_addr_t in_payload_dma_addr = 0;
357
struct fwctl_rpc_pds *rpc = in;
358
union pds_core_adminq_cmd cmd;
359
void *out_payload = NULL;
360
void *in_payload = NULL;
361
void *out = NULL;
362
int err;
363
364
err = pdsfc_validate_rpc(pdsfc, rpc, scope);
365
if (err)
366
return ERR_PTR(err);
367
368
if (rpc->in.len > 0) {
369
in_payload = kzalloc(rpc->in.len, GFP_KERNEL);
370
if (!in_payload) {
371
dev_err(dev, "Failed to allocate in_payload\n");
372
err = -ENOMEM;
373
goto err_out;
374
}
375
376
if (copy_from_user(in_payload, u64_to_user_ptr(rpc->in.payload),
377
rpc->in.len)) {
378
dev_dbg(dev, "Failed to copy in_payload from user\n");
379
err = -EFAULT;
380
goto err_in_payload;
381
}
382
383
in_payload_dma_addr = dma_map_single(dev->parent, in_payload,
384
rpc->in.len, DMA_TO_DEVICE);
385
err = dma_mapping_error(dev->parent, in_payload_dma_addr);
386
if (err) {
387
dev_dbg(dev, "Failed to map in_payload\n");
388
goto err_in_payload;
389
}
390
}
391
392
if (rpc->out.len > 0) {
393
out_payload = kzalloc(rpc->out.len, GFP_KERNEL);
394
if (!out_payload) {
395
dev_dbg(dev, "Failed to allocate out_payload\n");
396
err = -ENOMEM;
397
goto err_out_payload;
398
}
399
400
out_payload_dma_addr = dma_map_single(dev->parent, out_payload,
401
rpc->out.len, DMA_FROM_DEVICE);
402
err = dma_mapping_error(dev->parent, out_payload_dma_addr);
403
if (err) {
404
dev_dbg(dev, "Failed to map out_payload\n");
405
goto err_out_payload;
406
}
407
}
408
409
cmd = (union pds_core_adminq_cmd) {
410
.fwctl_rpc = {
411
.opcode = PDS_FWCTL_CMD_RPC,
412
.flags = cpu_to_le16(PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP),
413
.ep = cpu_to_le32(rpc->in.ep),
414
.op = cpu_to_le32(rpc->in.op),
415
.req_pa = cpu_to_le64(in_payload_dma_addr),
416
.req_sz = cpu_to_le32(rpc->in.len),
417
.resp_pa = cpu_to_le64(out_payload_dma_addr),
418
.resp_sz = cpu_to_le32(rpc->out.len),
419
}
420
};
421
422
err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
423
if (err) {
424
dev_dbg(dev, "%s: ep %d op %x req_pa %llx req_sz %d req_sg %d resp_pa %llx resp_sz %d resp_sg %d err %d\n",
425
__func__, rpc->in.ep, rpc->in.op,
426
cmd.fwctl_rpc.req_pa, cmd.fwctl_rpc.req_sz, cmd.fwctl_rpc.req_sg_elems,
427
cmd.fwctl_rpc.resp_pa, cmd.fwctl_rpc.resp_sz, cmd.fwctl_rpc.resp_sg_elems,
428
err);
429
goto done;
430
}
431
432
dynamic_hex_dump("out ", DUMP_PREFIX_OFFSET, 16, 1, out_payload, rpc->out.len, true);
433
434
if (copy_to_user(u64_to_user_ptr(rpc->out.payload), out_payload, rpc->out.len)) {
435
dev_dbg(dev, "Failed to copy out_payload to user\n");
436
out = ERR_PTR(-EFAULT);
437
goto done;
438
}
439
440
rpc->out.retval = le32_to_cpu(comp.fwctl_rpc.err);
441
*out_len = in_len;
442
out = in;
443
444
done:
445
if (out_payload_dma_addr)
446
dma_unmap_single(dev->parent, out_payload_dma_addr,
447
rpc->out.len, DMA_FROM_DEVICE);
448
err_out_payload:
449
kfree(out_payload);
450
451
if (in_payload_dma_addr)
452
dma_unmap_single(dev->parent, in_payload_dma_addr,
453
rpc->in.len, DMA_TO_DEVICE);
454
err_in_payload:
455
kfree(in_payload);
456
err_out:
457
if (err)
458
return ERR_PTR(err);
459
460
return out;
461
}
462
463
static const struct fwctl_ops pdsfc_ops = {
464
.device_type = FWCTL_DEVICE_TYPE_PDS,
465
.uctx_size = sizeof(struct pdsfc_uctx),
466
.open_uctx = pdsfc_open_uctx,
467
.close_uctx = pdsfc_close_uctx,
468
.info = pdsfc_info,
469
.fw_rpc = pdsfc_fw_rpc,
470
};
471
472
static int pdsfc_probe(struct auxiliary_device *adev,
473
const struct auxiliary_device_id *id)
474
{
475
struct pds_auxiliary_dev *padev =
476
container_of(adev, struct pds_auxiliary_dev, aux_dev);
477
struct device *dev = &adev->dev;
478
struct pdsfc_dev *pdsfc;
479
int err;
480
481
pdsfc = fwctl_alloc_device(&padev->vf_pdev->dev, &pdsfc_ops,
482
struct pdsfc_dev, fwctl);
483
if (!pdsfc)
484
return dev_err_probe(dev, -ENOMEM, "Failed to allocate fwctl device struct\n");
485
pdsfc->padev = padev;
486
487
err = pdsfc_identify(pdsfc);
488
if (err) {
489
fwctl_put(&pdsfc->fwctl);
490
return dev_err_probe(dev, err, "Failed to identify device\n");
491
}
492
493
err = pdsfc_init_endpoints(pdsfc);
494
if (err) {
495
fwctl_put(&pdsfc->fwctl);
496
return dev_err_probe(dev, err, "Failed to init endpoints\n");
497
}
498
499
pdsfc->caps = PDS_FWCTL_QUERY_CAP | PDS_FWCTL_SEND_CAP;
500
501
err = fwctl_register(&pdsfc->fwctl);
502
if (err) {
503
pdsfc_free_endpoints(pdsfc);
504
fwctl_put(&pdsfc->fwctl);
505
return dev_err_probe(dev, err, "Failed to register device\n");
506
}
507
508
auxiliary_set_drvdata(adev, pdsfc);
509
510
return 0;
511
}
512
513
static void pdsfc_remove(struct auxiliary_device *adev)
514
{
515
struct pdsfc_dev *pdsfc = auxiliary_get_drvdata(adev);
516
517
fwctl_unregister(&pdsfc->fwctl);
518
pdsfc_free_operations(pdsfc);
519
pdsfc_free_endpoints(pdsfc);
520
521
fwctl_put(&pdsfc->fwctl);
522
}
523
524
static const struct auxiliary_device_id pdsfc_id_table[] = {
525
{.name = PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_FWCTL_STR },
526
{}
527
};
528
MODULE_DEVICE_TABLE(auxiliary, pdsfc_id_table);
529
530
static struct auxiliary_driver pdsfc_driver = {
531
.name = "pds_fwctl",
532
.probe = pdsfc_probe,
533
.remove = pdsfc_remove,
534
.id_table = pdsfc_id_table,
535
};
536
537
module_auxiliary_driver(pdsfc_driver);
538
539
MODULE_IMPORT_NS("FWCTL");
540
MODULE_DESCRIPTION("pds fwctl driver");
541
MODULE_AUTHOR("Shannon Nelson <[email protected]>");
542
MODULE_AUTHOR("Brett Creeley <[email protected]>");
543
MODULE_LICENSE("GPL");
544
545