Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/qaic/qaic_drv.c
51892 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4
/* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
5
6
#include <linux/delay.h>
7
#include <linux/dma-mapping.h>
8
#include <linux/idr.h>
9
#include <linux/interrupt.h>
10
#include <linux/list.h>
11
#include <linux/kobject.h>
12
#include <linux/kref.h>
13
#include <linux/mhi.h>
14
#include <linux/module.h>
15
#include <linux/msi.h>
16
#include <linux/mutex.h>
17
#include <linux/pci.h>
18
#include <linux/spinlock.h>
19
#include <linux/workqueue.h>
20
#include <linux/wait.h>
21
#include <drm/drm_accel.h>
22
#include <drm/drm_drv.h>
23
#include <drm/drm_file.h>
24
#include <drm/drm_gem.h>
25
#include <drm/drm_ioctl.h>
26
#include <drm/drm_managed.h>
27
#include <uapi/drm/qaic_accel.h>
28
29
#include "mhi_controller.h"
30
#include "qaic.h"
31
#include "qaic_debugfs.h"
32
#include "qaic_ras.h"
33
#include "qaic_ssr.h"
34
#include "qaic_timesync.h"
35
#include "sahara.h"
36
37
MODULE_IMPORT_NS("DMA_BUF");
38
39
#define PCI_DEVICE_ID_QCOM_AIC080 0xa080
40
#define PCI_DEVICE_ID_QCOM_AIC100 0xa100
41
#define PCI_DEVICE_ID_QCOM_AIC200 0xa110
42
#define QAIC_NAME "qaic"
43
#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
44
#define CNTL_MAJOR 5
45
#define CNTL_MINOR 0
46
47
struct qaic_device_config {
48
/* Indicates the AIC family the device belongs to */
49
int family;
50
/* A bitmask representing the available BARs */
51
int bar_mask;
52
/* An index value used to identify the MHI controller BAR */
53
unsigned int mhi_bar_idx;
54
/* An index value used to identify the DBCs BAR */
55
unsigned int dbc_bar_idx;
56
};
57
58
static const struct qaic_device_config aic080_config = {
59
.family = FAMILY_AIC100,
60
.bar_mask = BIT(0) | BIT(2) | BIT(4),
61
.mhi_bar_idx = 0,
62
.dbc_bar_idx = 2,
63
};
64
65
static const struct qaic_device_config aic100_config = {
66
.family = FAMILY_AIC100,
67
.bar_mask = BIT(0) | BIT(2) | BIT(4),
68
.mhi_bar_idx = 0,
69
.dbc_bar_idx = 2,
70
};
71
72
static const struct qaic_device_config aic200_config = {
73
.family = FAMILY_AIC200,
74
.bar_mask = BIT(0) | BIT(1) | BIT(2) | BIT(4),
75
.mhi_bar_idx = 1,
76
.dbc_bar_idx = 2,
77
};
78
79
bool datapath_polling;
80
module_param(datapath_polling, bool, 0400);
81
MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
82
static bool link_up;
83
static DEFINE_IDA(qaic_usrs);
84
85
static void qaicm_wq_release(struct drm_device *dev, void *res)
86
{
87
struct workqueue_struct *wq = res;
88
89
destroy_workqueue(wq);
90
}
91
92
static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *name)
93
{
94
struct workqueue_struct *wq;
95
int ret;
96
97
wq = alloc_workqueue("%s", WQ_UNBOUND, 0, name);
98
if (!wq)
99
return ERR_PTR(-ENOMEM);
100
ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq);
101
if (ret)
102
return ERR_PTR(ret);
103
104
return wq;
105
}
106
107
static void qaicm_srcu_release(struct drm_device *dev, void *res)
108
{
109
struct srcu_struct *lock = res;
110
111
cleanup_srcu_struct(lock);
112
}
113
114
static int qaicm_srcu_init(struct drm_device *dev, struct srcu_struct *lock)
115
{
116
int ret;
117
118
ret = init_srcu_struct(lock);
119
if (ret)
120
return ret;
121
122
return drmm_add_action_or_reset(dev, qaicm_srcu_release, lock);
123
}
124
125
static void qaicm_pci_release(struct drm_device *dev, void *res)
126
{
127
struct qaic_device *qdev = to_qaic_device(dev);
128
129
pci_set_drvdata(qdev->pdev, NULL);
130
}
131
132
static void free_usr(struct kref *kref)
133
{
134
struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
135
136
cleanup_srcu_struct(&usr->qddev_lock);
137
ida_free(&qaic_usrs, usr->handle);
138
kfree(usr);
139
}
140
141
static int qaic_open(struct drm_device *dev, struct drm_file *file)
142
{
143
struct qaic_drm_device *qddev = to_qaic_drm_device(dev);
144
struct qaic_device *qdev = qddev->qdev;
145
struct qaic_user *usr;
146
int rcu_id;
147
int ret;
148
149
rcu_id = srcu_read_lock(&qdev->dev_lock);
150
if (qdev->dev_state != QAIC_ONLINE) {
151
ret = -ENODEV;
152
goto dev_unlock;
153
}
154
155
usr = kmalloc(sizeof(*usr), GFP_KERNEL);
156
if (!usr) {
157
ret = -ENOMEM;
158
goto dev_unlock;
159
}
160
161
usr->handle = ida_alloc(&qaic_usrs, GFP_KERNEL);
162
if (usr->handle < 0) {
163
ret = usr->handle;
164
goto free_usr;
165
}
166
usr->qddev = qddev;
167
atomic_set(&usr->chunk_id, 0);
168
init_srcu_struct(&usr->qddev_lock);
169
kref_init(&usr->ref_count);
170
171
ret = mutex_lock_interruptible(&qddev->users_mutex);
172
if (ret)
173
goto cleanup_usr;
174
175
list_add(&usr->node, &qddev->users);
176
mutex_unlock(&qddev->users_mutex);
177
178
file->driver_priv = usr;
179
180
srcu_read_unlock(&qdev->dev_lock, rcu_id);
181
return 0;
182
183
cleanup_usr:
184
cleanup_srcu_struct(&usr->qddev_lock);
185
ida_free(&qaic_usrs, usr->handle);
186
free_usr:
187
kfree(usr);
188
dev_unlock:
189
srcu_read_unlock(&qdev->dev_lock, rcu_id);
190
return ret;
191
}
192
193
static void qaic_postclose(struct drm_device *dev, struct drm_file *file)
194
{
195
struct qaic_user *usr = file->driver_priv;
196
struct qaic_drm_device *qddev;
197
struct qaic_device *qdev;
198
int qdev_rcu_id;
199
int usr_rcu_id;
200
int i;
201
202
qddev = usr->qddev;
203
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
204
if (qddev) {
205
qdev = qddev->qdev;
206
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
207
if (qdev->dev_state == QAIC_ONLINE) {
208
qaic_release_usr(qdev, usr);
209
for (i = 0; i < qdev->num_dbc; ++i)
210
if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle)
211
release_dbc(qdev, i);
212
}
213
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
214
215
mutex_lock(&qddev->users_mutex);
216
if (!list_empty(&usr->node))
217
list_del_init(&usr->node);
218
mutex_unlock(&qddev->users_mutex);
219
}
220
221
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
222
kref_put(&usr->ref_count, free_usr);
223
224
file->driver_priv = NULL;
225
}
226
227
DEFINE_DRM_ACCEL_FOPS(qaic_accel_fops);
228
229
static const struct drm_ioctl_desc qaic_drm_ioctls[] = {
230
DRM_IOCTL_DEF_DRV(QAIC_MANAGE, qaic_manage_ioctl, 0),
231
DRM_IOCTL_DEF_DRV(QAIC_CREATE_BO, qaic_create_bo_ioctl, 0),
232
DRM_IOCTL_DEF_DRV(QAIC_MMAP_BO, qaic_mmap_bo_ioctl, 0),
233
DRM_IOCTL_DEF_DRV(QAIC_ATTACH_SLICE_BO, qaic_attach_slice_bo_ioctl, 0),
234
DRM_IOCTL_DEF_DRV(QAIC_EXECUTE_BO, qaic_execute_bo_ioctl, 0),
235
DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0),
236
DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0),
237
DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0),
238
DRM_IOCTL_DEF_DRV(QAIC_DETACH_SLICE_BO, qaic_detach_slice_bo_ioctl, 0),
239
};
240
241
static const struct drm_driver qaic_accel_driver = {
242
.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
243
244
.name = QAIC_NAME,
245
.desc = QAIC_DESC,
246
247
.fops = &qaic_accel_fops,
248
.open = qaic_open,
249
.postclose = qaic_postclose,
250
251
.ioctls = qaic_drm_ioctls,
252
.num_ioctls = ARRAY_SIZE(qaic_drm_ioctls),
253
.gem_prime_import = qaic_gem_prime_import,
254
};
255
256
static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
257
{
258
struct qaic_drm_device *qddev = qdev->qddev;
259
struct drm_device *drm = to_drm(qddev);
260
int ret;
261
262
/* Hold off implementing partitions until the uapi is determined */
263
if (partition_id != QAIC_NO_PARTITION)
264
return -EINVAL;
265
266
qddev->partition_id = partition_id;
267
268
ret = drm_dev_register(drm, 0);
269
if (ret) {
270
pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
271
return ret;
272
}
273
274
ret = qaic_sysfs_init(qddev);
275
if (ret) {
276
drm_dev_unregister(drm);
277
pci_dbg(qdev->pdev, "qaic_sysfs_init failed %d\n", ret);
278
return ret;
279
}
280
281
qaic_debugfs_init(qddev);
282
283
return ret;
284
}
285
286
static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
287
{
288
struct qaic_drm_device *qddev = qdev->qddev;
289
struct drm_device *drm = to_drm(qddev);
290
struct qaic_user *usr;
291
292
qaic_sysfs_remove(qddev);
293
drm_dev_unregister(drm);
294
qddev->partition_id = 0;
295
/*
296
* Existing users get unresolvable errors till they close FDs.
297
* Need to sync carefully with users calling close(). The
298
* list of users can be modified elsewhere when the lock isn't
299
* held here, but the sync'ing the srcu with the mutex held
300
* could deadlock. Grab the mutex so that the list will be
301
* unmodified. The user we get will exist as long as the
302
* lock is held. Signal that the qcdev is going away, and
303
* grab a reference to the user so they don't go away for
304
* synchronize_srcu(). Then release the mutex to avoid
305
* deadlock and make sure the user has observed the signal.
306
* With the lock released, we cannot maintain any state of the
307
* user list.
308
*/
309
mutex_lock(&qddev->users_mutex);
310
while (!list_empty(&qddev->users)) {
311
usr = list_first_entry(&qddev->users, struct qaic_user, node);
312
list_del_init(&usr->node);
313
kref_get(&usr->ref_count);
314
usr->qddev = NULL;
315
mutex_unlock(&qddev->users_mutex);
316
synchronize_srcu(&usr->qddev_lock);
317
kref_put(&usr->ref_count, free_usr);
318
mutex_lock(&qddev->users_mutex);
319
}
320
mutex_unlock(&qddev->users_mutex);
321
}
322
323
static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
324
{
325
u16 major = -1, minor = -1;
326
struct qaic_device *qdev;
327
int ret;
328
329
/*
330
* Invoking this function indicates that the control channel to the
331
* device is available. We use that as a signal to indicate that
332
* the device side firmware has booted. The device side firmware
333
* manages the device resources, so we need to communicate with it
334
* via the control channel in order to utilize the device. Therefore
335
* we wait until this signal to create the drm dev that userspace will
336
* use to control the device, because without the device side firmware,
337
* userspace can't do anything useful.
338
*/
339
340
qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
341
342
dev_set_drvdata(&mhi_dev->dev, qdev);
343
qdev->cntl_ch = mhi_dev;
344
345
ret = qaic_control_open(qdev);
346
if (ret) {
347
pci_dbg(qdev->pdev, "%s: control_open failed %d\n", __func__, ret);
348
return ret;
349
}
350
351
qdev->dev_state = QAIC_BOOT;
352
ret = get_cntl_version(qdev, NULL, &major, &minor);
353
if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) {
354
pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n",
355
__func__, major, minor, CNTL_MAJOR, CNTL_MINOR, ret);
356
ret = -EINVAL;
357
goto close_control;
358
}
359
qdev->dev_state = QAIC_ONLINE;
360
kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_ONLINE);
361
362
return ret;
363
364
close_control:
365
qaic_control_close(qdev);
366
return ret;
367
}
368
369
static void qaic_mhi_remove(struct mhi_device *mhi_dev)
370
{
371
/* This is redundant since we have already observed the device crash */
372
}
373
374
static void qaic_notify_reset(struct qaic_device *qdev)
375
{
376
int i;
377
378
kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_OFFLINE);
379
qdev->dev_state = QAIC_OFFLINE;
380
/* wake up any waiters to avoid waiting for timeouts at sync */
381
wake_all_cntl(qdev);
382
for (i = 0; i < qdev->num_dbc; ++i)
383
wakeup_dbc(qdev, i);
384
synchronize_srcu(&qdev->dev_lock);
385
}
386
387
void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
388
{
389
int i;
390
391
qaic_notify_reset(qdev);
392
393
/* start tearing things down */
394
qaic_clean_up_ssr(qdev);
395
for (i = 0; i < qdev->num_dbc; ++i)
396
release_dbc(qdev, i);
397
}
398
399
static struct qaic_device *create_qdev(struct pci_dev *pdev,
400
const struct qaic_device_config *config)
401
{
402
struct device *dev = &pdev->dev;
403
struct qaic_drm_device *qddev;
404
struct qaic_device *qdev;
405
struct drm_device *drm;
406
int i, ret;
407
408
qdev = devm_kzalloc(dev, sizeof(*qdev), GFP_KERNEL);
409
if (!qdev)
410
return NULL;
411
412
qdev->dev_state = QAIC_OFFLINE;
413
qdev->num_dbc = 16;
414
qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
415
if (!qdev->dbc)
416
return NULL;
417
418
qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
419
if (IS_ERR(qddev))
420
return NULL;
421
422
drm = to_drm(qddev);
423
pci_set_drvdata(pdev, qdev);
424
425
ret = drmm_mutex_init(drm, &qddev->users_mutex);
426
if (ret)
427
return NULL;
428
ret = drmm_add_action_or_reset(drm, qaicm_pci_release, NULL);
429
if (ret)
430
return NULL;
431
ret = drmm_mutex_init(drm, &qdev->cntl_mutex);
432
if (ret)
433
return NULL;
434
ret = drmm_mutex_init(drm, &qdev->bootlog_mutex);
435
if (ret)
436
return NULL;
437
438
qdev->cntl_wq = qaicm_wq_init(drm, "qaic_cntl");
439
if (IS_ERR(qdev->cntl_wq))
440
return NULL;
441
qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts");
442
if (IS_ERR(qdev->qts_wq))
443
return NULL;
444
qdev->ssr_wq = qaicm_wq_init(drm, "qaic_ssr");
445
if (IS_ERR(qdev->ssr_wq))
446
return NULL;
447
448
ret = qaicm_srcu_init(drm, &qdev->dev_lock);
449
if (ret)
450
return NULL;
451
452
ret = qaic_ssr_init(qdev, drm);
453
if (ret)
454
pci_info(pdev, "QAIC SSR crashdump collection not supported.\n");
455
456
qdev->qddev = qddev;
457
qdev->pdev = pdev;
458
qddev->qdev = qdev;
459
460
INIT_LIST_HEAD(&qdev->cntl_xfer_list);
461
INIT_LIST_HEAD(&qdev->bootlog);
462
INIT_LIST_HEAD(&qddev->users);
463
464
for (i = 0; i < qdev->num_dbc; ++i) {
465
spin_lock_init(&qdev->dbc[i].xfer_lock);
466
qdev->dbc[i].qdev = qdev;
467
qdev->dbc[i].id = i;
468
INIT_LIST_HEAD(&qdev->dbc[i].xfer_list);
469
ret = qaicm_srcu_init(drm, &qdev->dbc[i].ch_lock);
470
if (ret)
471
return NULL;
472
init_waitqueue_head(&qdev->dbc[i].dbc_release);
473
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
474
ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock);
475
if (ret)
476
return NULL;
477
}
478
479
return qdev;
480
}
481
482
static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev,
483
const struct qaic_device_config *config)
484
{
485
int bars;
486
int ret;
487
488
bars = pci_select_bars(pdev, IORESOURCE_MEM) & 0x3f;
489
490
/* make sure the device has the expected BARs */
491
if (bars != config->bar_mask) {
492
pci_dbg(pdev, "%s: expected BARs %#x not found in device. Found %#x\n",
493
__func__, config->bar_mask, bars);
494
return -EINVAL;
495
}
496
497
ret = pcim_enable_device(pdev);
498
if (ret)
499
return ret;
500
501
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
502
if (ret)
503
return ret;
504
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
505
506
qdev->bar_mhi = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->mhi_bar_idx]);
507
if (IS_ERR(qdev->bar_mhi))
508
return PTR_ERR(qdev->bar_mhi);
509
510
qdev->bar_dbc = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->dbc_bar_idx]);
511
if (IS_ERR(qdev->bar_dbc))
512
return PTR_ERR(qdev->bar_dbc);
513
514
/* Managed release since we use pcim_enable_device above */
515
pci_set_master(pdev);
516
517
return 0;
518
}
519
520
static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
521
{
522
int irq_count = qdev->num_dbc + 1;
523
int mhi_irq;
524
int ret;
525
int i;
526
527
/* Managed release since we use pcim_enable_device */
528
ret = pci_alloc_irq_vectors(pdev, irq_count, irq_count, PCI_IRQ_MSI | PCI_IRQ_MSIX);
529
if (ret == -ENOSPC) {
530
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
531
if (ret < 0)
532
return ret;
533
534
/*
535
* Operate in one MSI mode. All interrupts will be directed to
536
* MSI0; every interrupt will wake up all the interrupt handlers
537
* (MHI and DBC[0-15]). Since the interrupt is now shared, it is
538
* not disabled during DBC threaded handler, but only one thread
539
* will be allowed to run per DBC, so while it can be
540
* interrupted, it shouldn't race with itself.
541
*/
542
qdev->single_msi = true;
543
pci_info(pdev, "Allocating %d MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n",
544
irq_count);
545
} else if (ret < 0) {
546
return ret;
547
}
548
549
mhi_irq = pci_irq_vector(pdev, 0);
550
if (mhi_irq < 0)
551
return mhi_irq;
552
553
for (i = 0; i < qdev->num_dbc; ++i) {
554
ret = devm_request_threaded_irq(&pdev->dev,
555
pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1),
556
dbc_irq_handler, dbc_irq_threaded_fn, IRQF_SHARED,
557
"qaic_dbc", &qdev->dbc[i]);
558
if (ret)
559
return ret;
560
561
if (datapath_polling) {
562
qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1);
563
if (!qdev->single_msi)
564
disable_irq_nosync(qdev->dbc[i].irq);
565
INIT_WORK(&qdev->dbc[i].poll_work, qaic_irq_polling_work);
566
}
567
}
568
569
return mhi_irq;
570
}
571
572
static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
573
{
574
struct qaic_device_config *config = (struct qaic_device_config *)id->driver_data;
575
struct qaic_device *qdev;
576
int mhi_irq;
577
int ret;
578
int i;
579
580
qdev = create_qdev(pdev, config);
581
if (!qdev)
582
return -ENOMEM;
583
584
ret = init_pci(qdev, pdev, config);
585
if (ret)
586
return ret;
587
588
for (i = 0; i < qdev->num_dbc; ++i)
589
qdev->dbc[i].dbc_base = qdev->bar_dbc + QAIC_DBC_OFF(i);
590
591
mhi_irq = init_msi(qdev, pdev);
592
if (mhi_irq < 0)
593
return mhi_irq;
594
595
ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
596
if (ret)
597
return ret;
598
599
qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_mhi, mhi_irq,
600
qdev->single_msi, config->family);
601
if (IS_ERR(qdev->mhi_cntrl)) {
602
ret = PTR_ERR(qdev->mhi_cntrl);
603
qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
604
return ret;
605
}
606
607
return 0;
608
}
609
610
static void qaic_pci_remove(struct pci_dev *pdev)
611
{
612
struct qaic_device *qdev = pci_get_drvdata(pdev);
613
614
if (!qdev)
615
return;
616
617
qaic_dev_reset_clean_local_state(qdev);
618
qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
619
qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
620
}
621
622
static void qaic_pci_shutdown(struct pci_dev *pdev)
623
{
624
/* see qaic_exit for what link_up is doing */
625
link_up = true;
626
qaic_pci_remove(pdev);
627
}
628
629
static pci_ers_result_t qaic_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t error)
630
{
631
return PCI_ERS_RESULT_NEED_RESET;
632
}
633
634
static void qaic_pci_reset_prepare(struct pci_dev *pdev)
635
{
636
struct qaic_device *qdev = pci_get_drvdata(pdev);
637
638
qaic_notify_reset(qdev);
639
qaic_mhi_start_reset(qdev->mhi_cntrl);
640
qaic_dev_reset_clean_local_state(qdev);
641
}
642
643
static void qaic_pci_reset_done(struct pci_dev *pdev)
644
{
645
struct qaic_device *qdev = pci_get_drvdata(pdev);
646
647
qaic_mhi_reset_done(qdev->mhi_cntrl);
648
}
649
650
static const struct mhi_device_id qaic_mhi_match_table[] = {
651
{ .chan = "QAIC_CONTROL", },
652
{},
653
};
654
655
static struct mhi_driver qaic_mhi_driver = {
656
.id_table = qaic_mhi_match_table,
657
.remove = qaic_mhi_remove,
658
.probe = qaic_mhi_probe,
659
.ul_xfer_cb = qaic_mhi_ul_xfer_cb,
660
.dl_xfer_cb = qaic_mhi_dl_xfer_cb,
661
.driver = {
662
.name = "qaic_mhi",
663
},
664
};
665
666
static const struct pci_device_id qaic_ids[] = {
667
{ PCI_DEVICE_DATA(QCOM, AIC080, (kernel_ulong_t)&aic080_config), },
668
{ PCI_DEVICE_DATA(QCOM, AIC100, (kernel_ulong_t)&aic100_config), },
669
{ PCI_DEVICE_DATA(QCOM, AIC200, (kernel_ulong_t)&aic200_config), },
670
{ }
671
};
672
MODULE_DEVICE_TABLE(pci, qaic_ids);
673
674
static const struct pci_error_handlers qaic_pci_err_handler = {
675
.error_detected = qaic_pci_error_detected,
676
.reset_prepare = qaic_pci_reset_prepare,
677
.reset_done = qaic_pci_reset_done,
678
};
679
680
static bool qaic_is_under_reset(struct qaic_device *qdev)
681
{
682
int rcu_id;
683
bool ret;
684
685
rcu_id = srcu_read_lock(&qdev->dev_lock);
686
ret = qdev->dev_state != QAIC_ONLINE;
687
srcu_read_unlock(&qdev->dev_lock, rcu_id);
688
return ret;
689
}
690
691
static bool qaic_data_path_busy(struct qaic_device *qdev)
692
{
693
bool ret = false;
694
int dev_rcu_id;
695
int i;
696
697
dev_rcu_id = srcu_read_lock(&qdev->dev_lock);
698
if (qdev->dev_state != QAIC_ONLINE) {
699
srcu_read_unlock(&qdev->dev_lock, dev_rcu_id);
700
return false;
701
}
702
for (i = 0; i < qdev->num_dbc; i++) {
703
struct dma_bridge_chan *dbc = &qdev->dbc[i];
704
unsigned long flags;
705
int ch_rcu_id;
706
707
ch_rcu_id = srcu_read_lock(&dbc->ch_lock);
708
if (!dbc->usr || !dbc->in_use) {
709
srcu_read_unlock(&dbc->ch_lock, ch_rcu_id);
710
continue;
711
}
712
spin_lock_irqsave(&dbc->xfer_lock, flags);
713
ret = !list_empty(&dbc->xfer_list);
714
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
715
srcu_read_unlock(&dbc->ch_lock, ch_rcu_id);
716
if (ret)
717
break;
718
}
719
srcu_read_unlock(&qdev->dev_lock, dev_rcu_id);
720
return ret;
721
}
722
723
static int qaic_pm_suspend(struct device *dev)
724
{
725
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
726
727
dev_dbg(dev, "Suspending..\n");
728
if (qaic_data_path_busy(qdev)) {
729
dev_dbg(dev, "Device's datapath is busy. Aborting suspend..\n");
730
return -EBUSY;
731
}
732
if (qaic_is_under_reset(qdev)) {
733
dev_dbg(dev, "Device is under reset. Aborting suspend..\n");
734
return -EBUSY;
735
}
736
qaic_mqts_ch_stop_timer(qdev->mqts_ch);
737
qaic_pci_reset_prepare(qdev->pdev);
738
pci_save_state(qdev->pdev);
739
pci_disable_device(qdev->pdev);
740
pci_set_power_state(qdev->pdev, PCI_D3hot);
741
return 0;
742
}
743
744
static int qaic_pm_resume(struct device *dev)
745
{
746
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
747
int ret;
748
749
dev_dbg(dev, "Resuming..\n");
750
pci_set_power_state(qdev->pdev, PCI_D0);
751
pci_restore_state(qdev->pdev);
752
ret = pci_enable_device(qdev->pdev);
753
if (ret) {
754
dev_err(dev, "pci_enable_device failed on resume %d\n", ret);
755
return ret;
756
}
757
pci_set_master(qdev->pdev);
758
qaic_pci_reset_done(qdev->pdev);
759
return 0;
760
}
761
762
static const struct dev_pm_ops qaic_pm_ops = {
763
SYSTEM_SLEEP_PM_OPS(qaic_pm_suspend, qaic_pm_resume)
764
};
765
766
static struct pci_driver qaic_pci_driver = {
767
.name = QAIC_NAME,
768
.id_table = qaic_ids,
769
.probe = qaic_pci_probe,
770
.remove = qaic_pci_remove,
771
.shutdown = qaic_pci_shutdown,
772
.err_handler = &qaic_pci_err_handler,
773
.driver = {
774
.pm = pm_sleep_ptr(&qaic_pm_ops),
775
},
776
};
777
778
static int __init qaic_init(void)
779
{
780
int ret;
781
782
ret = pci_register_driver(&qaic_pci_driver);
783
if (ret) {
784
pr_debug("qaic: pci_register_driver failed %d\n", ret);
785
return ret;
786
}
787
788
ret = mhi_driver_register(&qaic_mhi_driver);
789
if (ret) {
790
pr_debug("qaic: mhi_driver_register failed %d\n", ret);
791
goto free_pci;
792
}
793
794
ret = sahara_register();
795
if (ret) {
796
pr_debug("qaic: sahara_register failed %d\n", ret);
797
goto free_mhi;
798
}
799
800
ret = qaic_timesync_init();
801
if (ret)
802
pr_debug("qaic: qaic_timesync_init failed %d\n", ret);
803
804
ret = qaic_bootlog_register();
805
if (ret)
806
pr_debug("qaic: qaic_bootlog_register failed %d\n", ret);
807
808
ret = qaic_ras_register();
809
if (ret)
810
pr_debug("qaic: qaic_ras_register failed %d\n", ret);
811
ret = qaic_ssr_register();
812
if (ret) {
813
pr_debug("qaic: qaic_ssr_register failed %d\n", ret);
814
goto free_bootlog;
815
}
816
817
return 0;
818
819
free_bootlog:
820
qaic_bootlog_unregister();
821
free_mhi:
822
mhi_driver_unregister(&qaic_mhi_driver);
823
free_pci:
824
pci_unregister_driver(&qaic_pci_driver);
825
return ret;
826
}
827
828
static void __exit qaic_exit(void)
829
{
830
/*
831
* We assume that qaic_pci_remove() is called due to a hotplug event
832
* which would mean that the link is down, and thus
833
* qaic_mhi_free_controller() should not try to access the device during
834
* cleanup.
835
* We call pci_unregister_driver() below, which also triggers
836
* qaic_pci_remove(), but since this is module exit, we expect the link
837
* to the device to be up, in which case qaic_mhi_free_controller()
838
* should try to access the device during cleanup to put the device in
839
* a sane state.
840
* For that reason, we set link_up here to let qaic_mhi_free_controller
841
* know the expected link state. Since the module is going to be
842
* removed at the end of this, we don't need to worry about
843
* reinitializing the link_up state after the cleanup is done.
844
*/
845
link_up = true;
846
qaic_ssr_unregister();
847
qaic_ras_unregister();
848
qaic_bootlog_unregister();
849
qaic_timesync_deinit();
850
sahara_unregister();
851
mhi_driver_unregister(&qaic_mhi_driver);
852
pci_unregister_driver(&qaic_pci_driver);
853
}
854
855
module_init(qaic_init);
856
module_exit(qaic_exit);
857
858
MODULE_AUTHOR(QAIC_DESC " Kernel Driver Team");
859
MODULE_DESCRIPTION(QAIC_DESC " Accel Driver");
860
MODULE_LICENSE("GPL");
861
862