Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/amdxdna/aie2_pci.c
26427 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
4
*/
5
6
#include <drm/amdxdna_accel.h>
7
#include <drm/drm_device.h>
8
#include <drm/drm_drv.h>
9
#include <drm/drm_gem_shmem_helper.h>
10
#include <drm/drm_managed.h>
11
#include <drm/drm_print.h>
12
#include <drm/gpu_scheduler.h>
13
#include <linux/errno.h>
14
#include <linux/firmware.h>
15
#include <linux/iommu.h>
16
#include <linux/iopoll.h>
17
#include <linux/pci.h>
18
#include <linux/xarray.h>
19
20
#include "aie2_msg_priv.h"
21
#include "aie2_pci.h"
22
#include "aie2_solver.h"
23
#include "amdxdna_ctx.h"
24
#include "amdxdna_gem.h"
25
#include "amdxdna_mailbox.h"
26
#include "amdxdna_pci_drv.h"
27
28
static int aie2_max_col = XRS_MAX_COL;
29
module_param(aie2_max_col, uint, 0600);
30
MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
31
32
/*
33
* The management mailbox channel is allocated by firmware.
34
* The related register and ring buffer information is on SRAM BAR.
35
* This struct is the register layout.
36
*/
37
#define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
38
struct mgmt_mbox_chann_info {
39
__u32 x2i_tail;
40
__u32 x2i_head;
41
__u32 x2i_buf;
42
__u32 x2i_buf_sz;
43
__u32 i2x_tail;
44
__u32 i2x_head;
45
__u32 i2x_buf;
46
__u32 i2x_buf_sz;
47
__u32 magic;
48
__u32 msi_id;
49
__u32 prot_major;
50
__u32 prot_minor;
51
__u32 rsvd[4];
52
};
53
54
static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
55
{
56
struct amdxdna_dev *xdna = ndev->xdna;
57
58
/*
59
* The driver supported mailbox behavior is defined by
60
* ndev->priv->protocol_major and protocol_minor.
61
*
62
* When protocol_major and fw_major are different, it means driver
63
* and firmware are incompatible.
64
*/
65
if (ndev->priv->protocol_major != fw_major) {
66
XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d",
67
fw_major, fw_minor);
68
return -EINVAL;
69
}
70
71
/*
72
* When protocol_minor is greater then fw_minor, that means driver
73
* relies on operation the installed firmware does not support.
74
*/
75
if (ndev->priv->protocol_minor > fw_minor) {
76
XDNA_ERR(xdna, "Firmware minor version smaller than supported");
77
return -EINVAL;
78
}
79
return 0;
80
}
81
82
static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
83
{
84
struct amdxdna_dev *xdna = ndev->xdna;
85
86
XDNA_DBG(xdna, "i2x tail 0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
87
XDNA_DBG(xdna, "i2x head 0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
88
XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
89
XDNA_DBG(xdna, "i2x rsize 0x%x", ndev->mgmt_i2x.rb_size);
90
XDNA_DBG(xdna, "x2i tail 0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
91
XDNA_DBG(xdna, "x2i head 0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
92
XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
93
XDNA_DBG(xdna, "x2i rsize 0x%x", ndev->mgmt_x2i.rb_size);
94
XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
95
XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
96
XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
97
}
98
99
static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
100
{
101
struct mgmt_mbox_chann_info info_regs;
102
struct xdna_mailbox_chann_res *i2x;
103
struct xdna_mailbox_chann_res *x2i;
104
u32 addr, off;
105
u32 *reg;
106
int ret;
107
int i;
108
109
/*
110
* Once firmware is alive, it will write management channel
111
* information in SRAM BAR and write the address of that information
112
* at FW_ALIVE_OFF offset in SRMA BAR.
113
*
114
* Read a non-zero value from FW_ALIVE_OFF implies that firmware
115
* is alive.
116
*/
117
ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
118
addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
119
if (ret || !addr)
120
return -ETIME;
121
122
off = AIE2_SRAM_OFF(ndev, addr);
123
reg = (u32 *)&info_regs;
124
for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
125
reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
126
127
if (info_regs.magic != MGMT_MBOX_MAGIC) {
128
XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
129
ret = -EINVAL;
130
goto done;
131
}
132
133
i2x = &ndev->mgmt_i2x;
134
x2i = &ndev->mgmt_x2i;
135
136
i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
137
i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
138
i2x->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
139
i2x->rb_size = info_regs.i2x_buf_sz;
140
141
x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
142
x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
143
x2i->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
144
x2i->rb_size = info_regs.x2i_buf_sz;
145
146
ndev->mgmt_chan_idx = info_regs.msi_id;
147
ndev->mgmt_prot_major = info_regs.prot_major;
148
ndev->mgmt_prot_minor = info_regs.prot_minor;
149
150
ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
151
152
done:
153
aie2_dump_chann_info_debug(ndev);
154
155
/* Must clear address at FW_ALIVE_OFF */
156
writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
157
158
return ret;
159
}
160
161
int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
162
enum rt_config_category category, u32 *val)
163
{
164
const struct rt_config *cfg;
165
u32 value;
166
int ret;
167
168
for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
169
if (cfg->category != category)
170
continue;
171
172
value = val ? *val : cfg->value;
173
ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
174
if (ret) {
175
XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
176
cfg->type, value);
177
return ret;
178
}
179
}
180
181
return 0;
182
}
183
184
static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
185
{
186
int ret;
187
188
ret = aie2_suspend_fw(ndev);
189
if (ret) {
190
XDNA_ERR(ndev->xdna, "Suspend firmware failed");
191
return ret;
192
}
193
194
ret = aie2_resume_fw(ndev);
195
if (ret) {
196
XDNA_ERR(ndev->xdna, "Resume firmware failed");
197
return ret;
198
}
199
200
return 0;
201
}
202
203
static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
204
{
205
int ret;
206
207
ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
208
if (ret) {
209
XDNA_ERR(ndev->xdna, "Runtime config failed");
210
return ret;
211
}
212
213
ret = aie2_assign_mgmt_pasid(ndev, 0);
214
if (ret) {
215
XDNA_ERR(ndev->xdna, "Can not assign PASID");
216
return ret;
217
}
218
219
ret = aie2_xdna_reset(ndev);
220
if (ret) {
221
XDNA_ERR(ndev->xdna, "Reset firmware failed");
222
return ret;
223
}
224
225
if (!ndev->async_events)
226
return 0;
227
228
ret = aie2_error_async_events_send(ndev);
229
if (ret) {
230
XDNA_ERR(ndev->xdna, "Send async events failed");
231
return ret;
232
}
233
234
return 0;
235
}
236
237
static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
238
{
239
int ret;
240
241
ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
242
if (ret) {
243
XDNA_ERR(ndev->xdna, "query firmware version failed");
244
return ret;
245
}
246
247
ret = aie2_query_aie_version(ndev, &ndev->version);
248
if (ret) {
249
XDNA_ERR(ndev->xdna, "Query AIE version failed");
250
return ret;
251
}
252
253
ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
254
if (ret) {
255
XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
256
return ret;
257
}
258
259
return 0;
260
}
261
262
static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
263
{
264
if (aie2_suspend_fw(ndev))
265
XDNA_ERR(ndev->xdna, "Suspend_fw failed");
266
XDNA_DBG(ndev->xdna, "Firmware suspended");
267
}
268
269
static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
270
{
271
struct amdxdna_hwctx *hwctx = cb_arg;
272
struct amdxdna_dev *xdna;
273
int ret;
274
275
xdna = hwctx->client->xdna;
276
277
hwctx->start_col = action->part.start_col;
278
hwctx->num_col = action->part.ncols;
279
ret = aie2_create_context(xdna->dev_handle, hwctx);
280
if (ret)
281
XDNA_ERR(xdna, "create context failed, ret %d", ret);
282
283
return ret;
284
}
285
286
static int aie2_xrs_unload(void *cb_arg)
287
{
288
struct amdxdna_hwctx *hwctx = cb_arg;
289
struct amdxdna_dev *xdna;
290
int ret;
291
292
xdna = hwctx->client->xdna;
293
294
ret = aie2_destroy_context(xdna->dev_handle, hwctx);
295
if (ret)
296
XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
297
298
return ret;
299
}
300
301
static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
302
{
303
struct amdxdna_dev *xdna = to_xdna_dev(ddev);
304
struct amdxdna_dev_hdl *ndev;
305
306
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
307
308
ndev = xdna->dev_handle;
309
ndev->dft_dpm_level = dpm_level;
310
if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
311
return 0;
312
313
return ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
314
}
315
316
static struct xrs_action_ops aie2_xrs_actions = {
317
.load = aie2_xrs_load,
318
.unload = aie2_xrs_unload,
319
.set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
320
};
321
322
static void aie2_hw_stop(struct amdxdna_dev *xdna)
323
{
324
struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
325
struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
326
327
if (ndev->dev_status <= AIE2_DEV_INIT) {
328
XDNA_ERR(xdna, "device is already stopped");
329
return;
330
}
331
332
aie2_mgmt_fw_fini(ndev);
333
xdna_mailbox_stop_channel(ndev->mgmt_chann);
334
xdna_mailbox_destroy_channel(ndev->mgmt_chann);
335
ndev->mgmt_chann = NULL;
336
drmm_kfree(&xdna->ddev, ndev->mbox);
337
ndev->mbox = NULL;
338
aie2_psp_stop(ndev->psp_hdl);
339
aie2_smu_fini(ndev);
340
pci_disable_device(pdev);
341
342
ndev->dev_status = AIE2_DEV_INIT;
343
}
344
345
static int aie2_hw_start(struct amdxdna_dev *xdna)
346
{
347
struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
348
struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
349
struct xdna_mailbox_res mbox_res;
350
u32 xdna_mailbox_intr_reg;
351
int mgmt_mb_irq, ret;
352
353
if (ndev->dev_status >= AIE2_DEV_START) {
354
XDNA_INFO(xdna, "device is already started");
355
return 0;
356
}
357
358
ret = pci_enable_device(pdev);
359
if (ret) {
360
XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
361
return ret;
362
}
363
pci_set_master(pdev);
364
365
ret = aie2_smu_init(ndev);
366
if (ret) {
367
XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
368
goto disable_dev;
369
}
370
371
ret = aie2_psp_start(ndev->psp_hdl);
372
if (ret) {
373
XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
374
goto fini_smu;
375
}
376
377
ret = aie2_get_mgmt_chann_info(ndev);
378
if (ret) {
379
XDNA_ERR(xdna, "firmware is not alive");
380
goto stop_psp;
381
}
382
383
mbox_res.ringbuf_base = ndev->sram_base;
384
mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
385
mbox_res.mbox_base = ndev->mbox_base;
386
mbox_res.mbox_size = MBOX_SIZE(ndev);
387
mbox_res.name = "xdna_mailbox";
388
ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
389
if (!ndev->mbox) {
390
XDNA_ERR(xdna, "failed to create mailbox device");
391
ret = -ENODEV;
392
goto stop_psp;
393
}
394
395
mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
396
if (mgmt_mb_irq < 0) {
397
ret = mgmt_mb_irq;
398
XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
399
goto stop_psp;
400
}
401
402
xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
403
ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox,
404
&ndev->mgmt_x2i,
405
&ndev->mgmt_i2x,
406
xdna_mailbox_intr_reg,
407
mgmt_mb_irq);
408
if (!ndev->mgmt_chann) {
409
XDNA_ERR(xdna, "failed to create management mailbox channel");
410
ret = -EINVAL;
411
goto stop_psp;
412
}
413
414
ret = aie2_pm_init(ndev);
415
if (ret) {
416
XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
417
goto destroy_mgmt_chann;
418
}
419
420
ret = aie2_mgmt_fw_init(ndev);
421
if (ret) {
422
XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
423
goto destroy_mgmt_chann;
424
}
425
426
ndev->dev_status = AIE2_DEV_START;
427
428
return 0;
429
430
destroy_mgmt_chann:
431
xdna_mailbox_stop_channel(ndev->mgmt_chann);
432
xdna_mailbox_destroy_channel(ndev->mgmt_chann);
433
stop_psp:
434
aie2_psp_stop(ndev->psp_hdl);
435
fini_smu:
436
aie2_smu_fini(ndev);
437
disable_dev:
438
pci_disable_device(pdev);
439
440
return ret;
441
}
442
443
static int aie2_init(struct amdxdna_dev *xdna)
444
{
445
struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
446
void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
447
struct init_config xrs_cfg = { 0 };
448
struct amdxdna_dev_hdl *ndev;
449
struct psp_config psp_conf;
450
const struct firmware *fw;
451
unsigned long bars = 0;
452
int i, nvec, ret;
453
454
ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
455
if (!ndev)
456
return -ENOMEM;
457
458
ndev->priv = xdna->dev_info->dev_priv;
459
ndev->xdna = xdna;
460
461
ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev);
462
if (ret) {
463
XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
464
ndev->priv->fw_path, ret);
465
return ret;
466
}
467
468
ret = pcim_enable_device(pdev);
469
if (ret) {
470
XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
471
goto release_fw;
472
}
473
474
for (i = 0; i < PSP_MAX_REGS; i++)
475
set_bit(PSP_REG_BAR(ndev, i), &bars);
476
477
set_bit(xdna->dev_info->sram_bar, &bars);
478
set_bit(xdna->dev_info->smu_bar, &bars);
479
set_bit(xdna->dev_info->mbox_bar, &bars);
480
481
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
482
if (!test_bit(i, &bars))
483
continue;
484
tbl[i] = pcim_iomap(pdev, i, 0);
485
if (!tbl[i]) {
486
XDNA_ERR(xdna, "map bar %d failed", i);
487
ret = -ENOMEM;
488
goto release_fw;
489
}
490
}
491
492
ndev->sram_base = tbl[xdna->dev_info->sram_bar];
493
ndev->smu_base = tbl[xdna->dev_info->smu_bar];
494
ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
495
496
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
497
if (ret) {
498
XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
499
goto release_fw;
500
}
501
502
nvec = pci_msix_vec_count(pdev);
503
if (nvec <= 0) {
504
XDNA_ERR(xdna, "does not get number of interrupt vector");
505
ret = -EINVAL;
506
goto release_fw;
507
}
508
509
ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
510
if (ret < 0) {
511
XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
512
goto release_fw;
513
}
514
515
psp_conf.fw_size = fw->size;
516
psp_conf.fw_buf = fw->data;
517
for (i = 0; i < PSP_MAX_REGS; i++)
518
psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
519
ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
520
if (!ndev->psp_hdl) {
521
XDNA_ERR(xdna, "failed to create psp");
522
ret = -ENOMEM;
523
goto free_irq;
524
}
525
xdna->dev_handle = ndev;
526
527
ret = aie2_hw_start(xdna);
528
if (ret) {
529
XDNA_ERR(xdna, "start npu failed, ret %d", ret);
530
goto free_irq;
531
}
532
533
ret = aie2_mgmt_fw_query(ndev);
534
if (ret) {
535
XDNA_ERR(xdna, "Query firmware failed, ret %d", ret);
536
goto stop_hw;
537
}
538
ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
539
540
xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
541
for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
542
xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
543
xrs_cfg.sys_eff_factor = 1;
544
xrs_cfg.ddev = &xdna->ddev;
545
xrs_cfg.actions = &aie2_xrs_actions;
546
xrs_cfg.total_col = ndev->total_col;
547
548
xdna->xrs_hdl = xrsm_init(&xrs_cfg);
549
if (!xdna->xrs_hdl) {
550
XDNA_ERR(xdna, "Initialize resolver failed");
551
ret = -EINVAL;
552
goto stop_hw;
553
}
554
555
ret = aie2_error_async_events_alloc(ndev);
556
if (ret) {
557
XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
558
goto stop_hw;
559
}
560
561
ret = aie2_error_async_events_send(ndev);
562
if (ret) {
563
XDNA_ERR(xdna, "Send async events failed, ret %d", ret);
564
goto async_event_free;
565
}
566
567
/* Issue a command to make sure firmware handled async events */
568
ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
569
if (ret) {
570
XDNA_ERR(xdna, "Re-query firmware version failed");
571
goto async_event_free;
572
}
573
574
release_firmware(fw);
575
return 0;
576
577
async_event_free:
578
aie2_error_async_events_free(ndev);
579
stop_hw:
580
aie2_hw_stop(xdna);
581
free_irq:
582
pci_free_irq_vectors(pdev);
583
release_fw:
584
release_firmware(fw);
585
586
return ret;
587
}
588
589
static void aie2_fini(struct amdxdna_dev *xdna)
590
{
591
struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
592
struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
593
594
aie2_hw_stop(xdna);
595
aie2_error_async_events_free(ndev);
596
pci_free_irq_vectors(pdev);
597
}
598
599
static int aie2_get_aie_status(struct amdxdna_client *client,
600
struct amdxdna_drm_get_info *args)
601
{
602
struct amdxdna_drm_query_aie_status status;
603
struct amdxdna_dev *xdna = client->xdna;
604
struct amdxdna_dev_hdl *ndev;
605
int ret;
606
607
ndev = xdna->dev_handle;
608
if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
609
XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
610
return -EFAULT;
611
}
612
613
if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
614
XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
615
status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
616
return -EINVAL;
617
}
618
619
ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
620
status.buffer_size, &status.cols_filled);
621
if (ret) {
622
XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
623
return ret;
624
}
625
626
if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
627
XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
628
return -EFAULT;
629
}
630
631
return 0;
632
}
633
634
static int aie2_get_aie_metadata(struct amdxdna_client *client,
635
struct amdxdna_drm_get_info *args)
636
{
637
struct amdxdna_drm_query_aie_metadata *meta;
638
struct amdxdna_dev *xdna = client->xdna;
639
struct amdxdna_dev_hdl *ndev;
640
int ret = 0;
641
642
ndev = xdna->dev_handle;
643
meta = kzalloc(sizeof(*meta), GFP_KERNEL);
644
if (!meta)
645
return -ENOMEM;
646
647
meta->col_size = ndev->metadata.size;
648
meta->cols = ndev->metadata.cols;
649
meta->rows = ndev->metadata.rows;
650
651
meta->version.major = ndev->metadata.version.major;
652
meta->version.minor = ndev->metadata.version.minor;
653
654
meta->core.row_count = ndev->metadata.core.row_count;
655
meta->core.row_start = ndev->metadata.core.row_start;
656
meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
657
meta->core.lock_count = ndev->metadata.core.lock_count;
658
meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
659
660
meta->mem.row_count = ndev->metadata.mem.row_count;
661
meta->mem.row_start = ndev->metadata.mem.row_start;
662
meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
663
meta->mem.lock_count = ndev->metadata.mem.lock_count;
664
meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
665
666
meta->shim.row_count = ndev->metadata.shim.row_count;
667
meta->shim.row_start = ndev->metadata.shim.row_start;
668
meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
669
meta->shim.lock_count = ndev->metadata.shim.lock_count;
670
meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
671
672
if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
673
ret = -EFAULT;
674
675
kfree(meta);
676
return ret;
677
}
678
679
static int aie2_get_aie_version(struct amdxdna_client *client,
680
struct amdxdna_drm_get_info *args)
681
{
682
struct amdxdna_drm_query_aie_version version;
683
struct amdxdna_dev *xdna = client->xdna;
684
struct amdxdna_dev_hdl *ndev;
685
686
ndev = xdna->dev_handle;
687
version.major = ndev->version.major;
688
version.minor = ndev->version.minor;
689
690
if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
691
return -EFAULT;
692
693
return 0;
694
}
695
696
static int aie2_get_firmware_version(struct amdxdna_client *client,
697
struct amdxdna_drm_get_info *args)
698
{
699
struct amdxdna_drm_query_firmware_version version;
700
struct amdxdna_dev *xdna = client->xdna;
701
702
version.major = xdna->fw_ver.major;
703
version.minor = xdna->fw_ver.minor;
704
version.patch = xdna->fw_ver.sub;
705
version.build = xdna->fw_ver.build;
706
707
if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
708
return -EFAULT;
709
710
return 0;
711
}
712
713
static int aie2_get_power_mode(struct amdxdna_client *client,
714
struct amdxdna_drm_get_info *args)
715
{
716
struct amdxdna_drm_get_power_mode mode = {};
717
struct amdxdna_dev *xdna = client->xdna;
718
struct amdxdna_dev_hdl *ndev;
719
720
ndev = xdna->dev_handle;
721
mode.power_mode = ndev->pw_mode;
722
723
if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
724
return -EFAULT;
725
726
return 0;
727
}
728
729
static int aie2_get_clock_metadata(struct amdxdna_client *client,
730
struct amdxdna_drm_get_info *args)
731
{
732
struct amdxdna_drm_query_clock_metadata *clock;
733
struct amdxdna_dev *xdna = client->xdna;
734
struct amdxdna_dev_hdl *ndev;
735
int ret = 0;
736
737
ndev = xdna->dev_handle;
738
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
739
if (!clock)
740
return -ENOMEM;
741
742
snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
743
"MP-NPU Clock");
744
clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
745
snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
746
clock->h_clock.freq_mhz = ndev->hclk_freq;
747
748
if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
749
ret = -EFAULT;
750
751
kfree(clock);
752
return ret;
753
}
754
755
static int aie2_get_hwctx_status(struct amdxdna_client *client,
756
struct amdxdna_drm_get_info *args)
757
{
758
struct amdxdna_drm_query_hwctx __user *buf;
759
struct amdxdna_dev *xdna = client->xdna;
760
struct amdxdna_drm_query_hwctx *tmp;
761
struct amdxdna_client *tmp_client;
762
struct amdxdna_hwctx *hwctx;
763
unsigned long hwctx_id;
764
bool overflow = false;
765
u32 req_bytes = 0;
766
u32 hw_i = 0;
767
int ret = 0;
768
int idx;
769
770
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
771
772
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
773
if (!tmp)
774
return -ENOMEM;
775
776
buf = u64_to_user_ptr(args->buffer);
777
list_for_each_entry(tmp_client, &xdna->client_list, node) {
778
idx = srcu_read_lock(&tmp_client->hwctx_srcu);
779
amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
780
req_bytes += sizeof(*tmp);
781
if (args->buffer_size < req_bytes) {
782
/* Continue iterating to get the required size */
783
overflow = true;
784
continue;
785
}
786
787
memset(tmp, 0, sizeof(*tmp));
788
tmp->pid = tmp_client->pid;
789
tmp->context_id = hwctx->id;
790
tmp->start_col = hwctx->start_col;
791
tmp->num_col = hwctx->num_col;
792
tmp->command_submissions = hwctx->priv->seq;
793
tmp->command_completions = hwctx->priv->completed;
794
795
if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) {
796
ret = -EFAULT;
797
srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
798
goto out;
799
}
800
hw_i++;
801
}
802
srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
803
}
804
805
if (overflow) {
806
XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.",
807
args->buffer_size, req_bytes);
808
ret = -EINVAL;
809
}
810
811
out:
812
kfree(tmp);
813
args->buffer_size = req_bytes;
814
return ret;
815
}
816
817
static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
818
{
819
struct amdxdna_dev *xdna = client->xdna;
820
int ret, idx;
821
822
if (!drm_dev_enter(&xdna->ddev, &idx))
823
return -ENODEV;
824
825
switch (args->param) {
826
case DRM_AMDXDNA_QUERY_AIE_STATUS:
827
ret = aie2_get_aie_status(client, args);
828
break;
829
case DRM_AMDXDNA_QUERY_AIE_METADATA:
830
ret = aie2_get_aie_metadata(client, args);
831
break;
832
case DRM_AMDXDNA_QUERY_AIE_VERSION:
833
ret = aie2_get_aie_version(client, args);
834
break;
835
case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
836
ret = aie2_get_clock_metadata(client, args);
837
break;
838
case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
839
ret = aie2_get_hwctx_status(client, args);
840
break;
841
case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
842
ret = aie2_get_firmware_version(client, args);
843
break;
844
case DRM_AMDXDNA_GET_POWER_MODE:
845
ret = aie2_get_power_mode(client, args);
846
break;
847
default:
848
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
849
ret = -EOPNOTSUPP;
850
}
851
XDNA_DBG(xdna, "Got param %d", args->param);
852
853
drm_dev_exit(idx);
854
return ret;
855
}
856
857
static int aie2_set_power_mode(struct amdxdna_client *client,
858
struct amdxdna_drm_set_state *args)
859
{
860
struct amdxdna_drm_set_power_mode power_state;
861
enum amdxdna_power_mode_type power_mode;
862
struct amdxdna_dev *xdna = client->xdna;
863
864
if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
865
sizeof(power_state))) {
866
XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
867
return -EFAULT;
868
}
869
870
if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
871
return -EINVAL;
872
873
power_mode = power_state.power_mode;
874
if (power_mode > POWER_MODE_TURBO) {
875
XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
876
return -EINVAL;
877
}
878
879
return aie2_pm_set_mode(xdna->dev_handle, power_mode);
880
}
881
882
static int aie2_set_state(struct amdxdna_client *client,
883
struct amdxdna_drm_set_state *args)
884
{
885
struct amdxdna_dev *xdna = client->xdna;
886
int ret, idx;
887
888
if (!drm_dev_enter(&xdna->ddev, &idx))
889
return -ENODEV;
890
891
switch (args->param) {
892
case DRM_AMDXDNA_SET_POWER_MODE:
893
ret = aie2_set_power_mode(client, args);
894
break;
895
default:
896
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
897
ret = -EOPNOTSUPP;
898
break;
899
}
900
901
drm_dev_exit(idx);
902
return ret;
903
}
904
905
const struct amdxdna_dev_ops aie2_ops = {
906
.init = aie2_init,
907
.fini = aie2_fini,
908
.resume = aie2_hw_start,
909
.suspend = aie2_hw_stop,
910
.get_aie_info = aie2_get_info,
911
.set_aie_state = aie2_set_state,
912
.hwctx_init = aie2_hwctx_init,
913
.hwctx_fini = aie2_hwctx_fini,
914
.hwctx_config = aie2_hwctx_config,
915
.cmd_submit = aie2_cmd_submit,
916
.hmm_invalidate = aie2_hmm_invalidate,
917
.hwctx_suspend = aie2_hwctx_suspend,
918
.hwctx_resume = aie2_hwctx_resume,
919
};
920
921