Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/amdxdna/amdxdna_pci_drv.c
52262 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4
*/
5
6
#include <drm/amdxdna_accel.h>
7
#include <drm/drm_accel.h>
8
#include <drm/drm_drv.h>
9
#include <drm/drm_gem.h>
10
#include <drm/drm_gem_shmem_helper.h>
11
#include <drm/drm_ioctl.h>
12
#include <drm/drm_managed.h>
13
#include <drm/gpu_scheduler.h>
14
#include <linux/iommu.h>
15
#include <linux/pci.h>
16
17
#include "amdxdna_ctx.h"
18
#include "amdxdna_gem.h"
19
#include "amdxdna_pci_drv.h"
20
#include "amdxdna_pm.h"
21
22
MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
23
MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
24
MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
25
MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
26
27
/*
28
* 0.0: Initial version
29
* 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
30
* 0.2: Support getting last error hardware error
31
* 0.3: Support firmware debug buffer
32
* 0.4: Support getting resource information
33
* 0.5: Support getting telemetry data
34
* 0.6: Support preemption
35
*/
36
#define AMDXDNA_DRIVER_MAJOR 0
37
#define AMDXDNA_DRIVER_MINOR 6
38
39
/*
40
* Bind the driver base on (vendor_id, device_id) pair and later use the
41
* (device_id, rev_id) pair as a key to select the devices. The devices with
42
* same device_id have very similar interface to host driver.
43
*/
44
static const struct pci_device_id pci_ids[] = {
45
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1502) },
46
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x17f0) },
47
{0}
48
};
49
50
MODULE_DEVICE_TABLE(pci, pci_ids);
51
52
static const struct amdxdna_device_id amdxdna_ids[] = {
53
{ 0x1502, 0x0, &dev_npu1_info },
54
{ 0x17f0, 0x10, &dev_npu4_info },
55
{ 0x17f0, 0x11, &dev_npu5_info },
56
{ 0x17f0, 0x20, &dev_npu6_info },
57
{0}
58
};
59
60
static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
61
{
62
struct amdxdna_dev *xdna = to_xdna_dev(ddev);
63
struct amdxdna_client *client;
64
int ret;
65
66
client = kzalloc(sizeof(*client), GFP_KERNEL);
67
if (!client)
68
return -ENOMEM;
69
70
client->pid = pid_nr(rcu_access_pointer(filp->pid));
71
client->xdna = xdna;
72
73
client->sva = iommu_sva_bind_device(xdna->ddev.dev, current->mm);
74
if (IS_ERR(client->sva)) {
75
ret = PTR_ERR(client->sva);
76
XDNA_ERR(xdna, "SVA bind device failed, ret %d", ret);
77
goto failed;
78
}
79
client->pasid = iommu_sva_get_pasid(client->sva);
80
if (client->pasid == IOMMU_PASID_INVALID) {
81
XDNA_ERR(xdna, "SVA get pasid failed");
82
ret = -ENODEV;
83
goto unbind_sva;
84
}
85
client->mm = current->mm;
86
mmgrab(client->mm);
87
init_srcu_struct(&client->hwctx_srcu);
88
xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
89
mutex_init(&client->mm_lock);
90
91
mutex_lock(&xdna->dev_lock);
92
list_add_tail(&client->node, &xdna->client_list);
93
mutex_unlock(&xdna->dev_lock);
94
95
filp->driver_priv = client;
96
client->filp = filp;
97
98
XDNA_DBG(xdna, "pid %d opened", client->pid);
99
return 0;
100
101
unbind_sva:
102
iommu_sva_unbind_device(client->sva);
103
failed:
104
kfree(client);
105
106
return ret;
107
}
108
109
static void amdxdna_client_cleanup(struct amdxdna_client *client)
110
{
111
list_del(&client->node);
112
amdxdna_hwctx_remove_all(client);
113
xa_destroy(&client->hwctx_xa);
114
cleanup_srcu_struct(&client->hwctx_srcu);
115
mutex_destroy(&client->mm_lock);
116
117
if (client->dev_heap)
118
drm_gem_object_put(to_gobj(client->dev_heap));
119
120
iommu_sva_unbind_device(client->sva);
121
mmdrop(client->mm);
122
123
kfree(client);
124
}
125
126
static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
127
{
128
struct amdxdna_client *client = filp->driver_priv;
129
struct amdxdna_dev *xdna = to_xdna_dev(ddev);
130
int idx;
131
132
XDNA_DBG(xdna, "closing pid %d", client->pid);
133
134
if (!drm_dev_enter(&xdna->ddev, &idx))
135
return;
136
137
mutex_lock(&xdna->dev_lock);
138
amdxdna_client_cleanup(client);
139
mutex_unlock(&xdna->dev_lock);
140
141
drm_dev_exit(idx);
142
}
143
144
static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
145
{
146
struct amdxdna_client *client = filp->driver_priv;
147
struct amdxdna_dev *xdna = to_xdna_dev(dev);
148
struct amdxdna_drm_get_info *args = data;
149
int ret;
150
151
if (!xdna->dev_info->ops->get_aie_info)
152
return -EOPNOTSUPP;
153
154
XDNA_DBG(xdna, "Request parameter %u", args->param);
155
mutex_lock(&xdna->dev_lock);
156
ret = xdna->dev_info->ops->get_aie_info(client, args);
157
mutex_unlock(&xdna->dev_lock);
158
return ret;
159
}
160
161
static int amdxdna_drm_get_array_ioctl(struct drm_device *dev, void *data,
162
struct drm_file *filp)
163
{
164
struct amdxdna_client *client = filp->driver_priv;
165
struct amdxdna_dev *xdna = to_xdna_dev(dev);
166
struct amdxdna_drm_get_array *args = data;
167
168
if (!xdna->dev_info->ops->get_array)
169
return -EOPNOTSUPP;
170
171
if (args->pad || !args->num_element || !args->element_size)
172
return -EINVAL;
173
174
guard(mutex)(&xdna->dev_lock);
175
return xdna->dev_info->ops->get_array(client, args);
176
}
177
178
static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
179
{
180
struct amdxdna_client *client = filp->driver_priv;
181
struct amdxdna_dev *xdna = to_xdna_dev(dev);
182
struct amdxdna_drm_set_state *args = data;
183
int ret;
184
185
if (!xdna->dev_info->ops->set_aie_state)
186
return -EOPNOTSUPP;
187
188
XDNA_DBG(xdna, "Request parameter %u", args->param);
189
mutex_lock(&xdna->dev_lock);
190
ret = xdna->dev_info->ops->set_aie_state(client, args);
191
mutex_unlock(&xdna->dev_lock);
192
193
return ret;
194
}
195
196
static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
197
/* Context */
198
DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_HWCTX, amdxdna_drm_create_hwctx_ioctl, 0),
199
DRM_IOCTL_DEF_DRV(AMDXDNA_DESTROY_HWCTX, amdxdna_drm_destroy_hwctx_ioctl, 0),
200
DRM_IOCTL_DEF_DRV(AMDXDNA_CONFIG_HWCTX, amdxdna_drm_config_hwctx_ioctl, 0),
201
/* BO */
202
DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_BO, amdxdna_drm_create_bo_ioctl, 0),
203
DRM_IOCTL_DEF_DRV(AMDXDNA_GET_BO_INFO, amdxdna_drm_get_bo_info_ioctl, 0),
204
DRM_IOCTL_DEF_DRV(AMDXDNA_SYNC_BO, amdxdna_drm_sync_bo_ioctl, 0),
205
/* Execution */
206
DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
207
/* AIE hardware */
208
DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
209
DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY, amdxdna_drm_get_array_ioctl, 0),
210
DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
211
};
212
213
static const struct file_operations amdxdna_fops = {
214
.owner = THIS_MODULE,
215
.open = accel_open,
216
.release = drm_release,
217
.unlocked_ioctl = drm_ioctl,
218
.compat_ioctl = drm_compat_ioctl,
219
.poll = drm_poll,
220
.read = drm_read,
221
.llseek = noop_llseek,
222
.mmap = drm_gem_mmap,
223
.fop_flags = FOP_UNSIGNED_OFFSET,
224
};
225
226
const struct drm_driver amdxdna_drm_drv = {
227
.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL |
228
DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
229
.fops = &amdxdna_fops,
230
.name = "amdxdna_accel_driver",
231
.desc = "AMD XDNA DRM implementation",
232
.major = AMDXDNA_DRIVER_MAJOR,
233
.minor = AMDXDNA_DRIVER_MINOR,
234
.open = amdxdna_drm_open,
235
.postclose = amdxdna_drm_close,
236
.ioctls = amdxdna_drm_ioctls,
237
.num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
238
239
.gem_create_object = amdxdna_gem_create_object_cb,
240
.gem_prime_import = amdxdna_gem_prime_import,
241
};
242
243
static const struct amdxdna_dev_info *
244
amdxdna_get_dev_info(struct pci_dev *pdev)
245
{
246
int i;
247
248
for (i = 0; i < ARRAY_SIZE(amdxdna_ids); i++) {
249
if (pdev->device == amdxdna_ids[i].device &&
250
pdev->revision == amdxdna_ids[i].revision)
251
return amdxdna_ids[i].dev_info;
252
}
253
return NULL;
254
}
255
256
static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
257
{
258
struct device *dev = &pdev->dev;
259
struct amdxdna_dev *xdna;
260
int ret;
261
262
xdna = devm_drm_dev_alloc(dev, &amdxdna_drm_drv, typeof(*xdna), ddev);
263
if (IS_ERR(xdna))
264
return PTR_ERR(xdna);
265
266
xdna->dev_info = amdxdna_get_dev_info(pdev);
267
if (!xdna->dev_info)
268
return -ENODEV;
269
270
drmm_mutex_init(&xdna->ddev, &xdna->dev_lock);
271
init_rwsem(&xdna->notifier_lock);
272
INIT_LIST_HEAD(&xdna->client_list);
273
pci_set_drvdata(pdev, xdna);
274
275
if (IS_ENABLED(CONFIG_LOCKDEP)) {
276
fs_reclaim_acquire(GFP_KERNEL);
277
might_lock(&xdna->notifier_lock);
278
fs_reclaim_release(GFP_KERNEL);
279
}
280
281
xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", WQ_MEM_RECLAIM);
282
if (!xdna->notifier_wq)
283
return -ENOMEM;
284
285
mutex_lock(&xdna->dev_lock);
286
ret = xdna->dev_info->ops->init(xdna);
287
mutex_unlock(&xdna->dev_lock);
288
if (ret) {
289
XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
290
goto destroy_notifier_wq;
291
}
292
293
ret = amdxdna_sysfs_init(xdna);
294
if (ret) {
295
XDNA_ERR(xdna, "Create amdxdna attrs failed: %d", ret);
296
goto failed_dev_fini;
297
}
298
299
ret = drm_dev_register(&xdna->ddev, 0);
300
if (ret) {
301
XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
302
goto failed_sysfs_fini;
303
}
304
305
return 0;
306
307
failed_sysfs_fini:
308
amdxdna_sysfs_fini(xdna);
309
failed_dev_fini:
310
mutex_lock(&xdna->dev_lock);
311
xdna->dev_info->ops->fini(xdna);
312
mutex_unlock(&xdna->dev_lock);
313
destroy_notifier_wq:
314
destroy_workqueue(xdna->notifier_wq);
315
return ret;
316
}
317
318
static void amdxdna_remove(struct pci_dev *pdev)
319
{
320
struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
321
struct amdxdna_client *client;
322
323
destroy_workqueue(xdna->notifier_wq);
324
325
drm_dev_unplug(&xdna->ddev);
326
amdxdna_sysfs_fini(xdna);
327
328
mutex_lock(&xdna->dev_lock);
329
client = list_first_entry_or_null(&xdna->client_list,
330
struct amdxdna_client, node);
331
while (client) {
332
amdxdna_client_cleanup(client);
333
334
client = list_first_entry_or_null(&xdna->client_list,
335
struct amdxdna_client, node);
336
}
337
338
xdna->dev_info->ops->fini(xdna);
339
mutex_unlock(&xdna->dev_lock);
340
}
341
342
static const struct dev_pm_ops amdxdna_pm_ops = {
343
SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume)
344
RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL)
345
};
346
347
static struct pci_driver amdxdna_pci_driver = {
348
.name = KBUILD_MODNAME,
349
.id_table = pci_ids,
350
.probe = amdxdna_probe,
351
.remove = amdxdna_remove,
352
.driver.pm = &amdxdna_pm_ops,
353
};
354
355
module_pci_driver(amdxdna_pci_driver);
356
357
MODULE_LICENSE("GPL");
358
MODULE_AUTHOR("XRT Team <[email protected]>");
359
MODULE_DESCRIPTION("amdxdna driver");
360
361