Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/qaic/qaic_drv.c
26428 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4
/* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
5
6
#include <linux/delay.h>
7
#include <linux/dma-mapping.h>
8
#include <linux/idr.h>
9
#include <linux/interrupt.h>
10
#include <linux/list.h>
11
#include <linux/kobject.h>
12
#include <linux/kref.h>
13
#include <linux/mhi.h>
14
#include <linux/module.h>
15
#include <linux/msi.h>
16
#include <linux/mutex.h>
17
#include <linux/pci.h>
18
#include <linux/spinlock.h>
19
#include <linux/workqueue.h>
20
#include <linux/wait.h>
21
#include <drm/drm_accel.h>
22
#include <drm/drm_drv.h>
23
#include <drm/drm_file.h>
24
#include <drm/drm_gem.h>
25
#include <drm/drm_ioctl.h>
26
#include <drm/drm_managed.h>
27
#include <uapi/drm/qaic_accel.h>
28
29
#include "mhi_controller.h"
30
#include "qaic.h"
31
#include "qaic_debugfs.h"
32
#include "qaic_ras.h"
33
#include "qaic_timesync.h"
34
#include "sahara.h"
35
36
MODULE_IMPORT_NS("DMA_BUF");
37
38
#define PCI_DEVICE_ID_QCOM_AIC080 0xa080
39
#define PCI_DEVICE_ID_QCOM_AIC100 0xa100
40
#define PCI_DEVICE_ID_QCOM_AIC200 0xa110
41
#define QAIC_NAME "qaic"
42
#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
43
#define CNTL_MAJOR 5
44
#define CNTL_MINOR 0
45
46
struct qaic_device_config {
47
/* Indicates the AIC family the device belongs to */
48
int family;
49
/* A bitmask representing the available BARs */
50
int bar_mask;
51
/* An index value used to identify the MHI controller BAR */
52
unsigned int mhi_bar_idx;
53
/* An index value used to identify the DBCs BAR */
54
unsigned int dbc_bar_idx;
55
};
56
57
static const struct qaic_device_config aic080_config = {
58
.family = FAMILY_AIC100,
59
.bar_mask = BIT(0) | BIT(2) | BIT(4),
60
.mhi_bar_idx = 0,
61
.dbc_bar_idx = 2,
62
};
63
64
static const struct qaic_device_config aic100_config = {
65
.family = FAMILY_AIC100,
66
.bar_mask = BIT(0) | BIT(2) | BIT(4),
67
.mhi_bar_idx = 0,
68
.dbc_bar_idx = 2,
69
};
70
71
static const struct qaic_device_config aic200_config = {
72
.family = FAMILY_AIC200,
73
.bar_mask = BIT(0) | BIT(1) | BIT(2) | BIT(4),
74
.mhi_bar_idx = 1,
75
.dbc_bar_idx = 2,
76
};
77
78
bool datapath_polling;
79
module_param(datapath_polling, bool, 0400);
80
MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
81
static bool link_up;
82
static DEFINE_IDA(qaic_usrs);
83
84
static void qaicm_wq_release(struct drm_device *dev, void *res)
85
{
86
struct workqueue_struct *wq = res;
87
88
destroy_workqueue(wq);
89
}
90
91
static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *name)
92
{
93
struct workqueue_struct *wq;
94
int ret;
95
96
wq = alloc_workqueue("%s", WQ_UNBOUND, 0, name);
97
if (!wq)
98
return ERR_PTR(-ENOMEM);
99
ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq);
100
if (ret)
101
return ERR_PTR(ret);
102
103
return wq;
104
}
105
106
static void qaicm_srcu_release(struct drm_device *dev, void *res)
107
{
108
struct srcu_struct *lock = res;
109
110
cleanup_srcu_struct(lock);
111
}
112
113
static int qaicm_srcu_init(struct drm_device *dev, struct srcu_struct *lock)
114
{
115
int ret;
116
117
ret = init_srcu_struct(lock);
118
if (ret)
119
return ret;
120
121
return drmm_add_action_or_reset(dev, qaicm_srcu_release, lock);
122
}
123
124
static void qaicm_pci_release(struct drm_device *dev, void *res)
125
{
126
struct qaic_device *qdev = to_qaic_device(dev);
127
128
pci_set_drvdata(qdev->pdev, NULL);
129
}
130
131
static void free_usr(struct kref *kref)
132
{
133
struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
134
135
cleanup_srcu_struct(&usr->qddev_lock);
136
ida_free(&qaic_usrs, usr->handle);
137
kfree(usr);
138
}
139
140
static int qaic_open(struct drm_device *dev, struct drm_file *file)
141
{
142
struct qaic_drm_device *qddev = to_qaic_drm_device(dev);
143
struct qaic_device *qdev = qddev->qdev;
144
struct qaic_user *usr;
145
int rcu_id;
146
int ret;
147
148
rcu_id = srcu_read_lock(&qdev->dev_lock);
149
if (qdev->dev_state != QAIC_ONLINE) {
150
ret = -ENODEV;
151
goto dev_unlock;
152
}
153
154
usr = kmalloc(sizeof(*usr), GFP_KERNEL);
155
if (!usr) {
156
ret = -ENOMEM;
157
goto dev_unlock;
158
}
159
160
usr->handle = ida_alloc(&qaic_usrs, GFP_KERNEL);
161
if (usr->handle < 0) {
162
ret = usr->handle;
163
goto free_usr;
164
}
165
usr->qddev = qddev;
166
atomic_set(&usr->chunk_id, 0);
167
init_srcu_struct(&usr->qddev_lock);
168
kref_init(&usr->ref_count);
169
170
ret = mutex_lock_interruptible(&qddev->users_mutex);
171
if (ret)
172
goto cleanup_usr;
173
174
list_add(&usr->node, &qddev->users);
175
mutex_unlock(&qddev->users_mutex);
176
177
file->driver_priv = usr;
178
179
srcu_read_unlock(&qdev->dev_lock, rcu_id);
180
return 0;
181
182
cleanup_usr:
183
cleanup_srcu_struct(&usr->qddev_lock);
184
ida_free(&qaic_usrs, usr->handle);
185
free_usr:
186
kfree(usr);
187
dev_unlock:
188
srcu_read_unlock(&qdev->dev_lock, rcu_id);
189
return ret;
190
}
191
192
static void qaic_postclose(struct drm_device *dev, struct drm_file *file)
193
{
194
struct qaic_user *usr = file->driver_priv;
195
struct qaic_drm_device *qddev;
196
struct qaic_device *qdev;
197
int qdev_rcu_id;
198
int usr_rcu_id;
199
int i;
200
201
qddev = usr->qddev;
202
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
203
if (qddev) {
204
qdev = qddev->qdev;
205
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
206
if (qdev->dev_state == QAIC_ONLINE) {
207
qaic_release_usr(qdev, usr);
208
for (i = 0; i < qdev->num_dbc; ++i)
209
if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle)
210
release_dbc(qdev, i);
211
}
212
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
213
214
mutex_lock(&qddev->users_mutex);
215
if (!list_empty(&usr->node))
216
list_del_init(&usr->node);
217
mutex_unlock(&qddev->users_mutex);
218
}
219
220
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
221
kref_put(&usr->ref_count, free_usr);
222
223
file->driver_priv = NULL;
224
}
225
226
DEFINE_DRM_ACCEL_FOPS(qaic_accel_fops);
227
228
static const struct drm_ioctl_desc qaic_drm_ioctls[] = {
229
DRM_IOCTL_DEF_DRV(QAIC_MANAGE, qaic_manage_ioctl, 0),
230
DRM_IOCTL_DEF_DRV(QAIC_CREATE_BO, qaic_create_bo_ioctl, 0),
231
DRM_IOCTL_DEF_DRV(QAIC_MMAP_BO, qaic_mmap_bo_ioctl, 0),
232
DRM_IOCTL_DEF_DRV(QAIC_ATTACH_SLICE_BO, qaic_attach_slice_bo_ioctl, 0),
233
DRM_IOCTL_DEF_DRV(QAIC_EXECUTE_BO, qaic_execute_bo_ioctl, 0),
234
DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0),
235
DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0),
236
DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0),
237
DRM_IOCTL_DEF_DRV(QAIC_DETACH_SLICE_BO, qaic_detach_slice_bo_ioctl, 0),
238
};
239
240
static const struct drm_driver qaic_accel_driver = {
241
.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
242
243
.name = QAIC_NAME,
244
.desc = QAIC_DESC,
245
246
.fops = &qaic_accel_fops,
247
.open = qaic_open,
248
.postclose = qaic_postclose,
249
250
.ioctls = qaic_drm_ioctls,
251
.num_ioctls = ARRAY_SIZE(qaic_drm_ioctls),
252
.gem_prime_import = qaic_gem_prime_import,
253
};
254
255
static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
256
{
257
struct qaic_drm_device *qddev = qdev->qddev;
258
struct drm_device *drm = to_drm(qddev);
259
int ret;
260
261
/* Hold off implementing partitions until the uapi is determined */
262
if (partition_id != QAIC_NO_PARTITION)
263
return -EINVAL;
264
265
qddev->partition_id = partition_id;
266
267
ret = drm_dev_register(drm, 0);
268
if (ret) {
269
pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
270
return ret;
271
}
272
273
qaic_debugfs_init(qddev);
274
275
return ret;
276
}
277
278
static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
279
{
280
struct qaic_drm_device *qddev = qdev->qddev;
281
struct drm_device *drm = to_drm(qddev);
282
struct qaic_user *usr;
283
284
drm_dev_unregister(drm);
285
qddev->partition_id = 0;
286
/*
287
* Existing users get unresolvable errors till they close FDs.
288
* Need to sync carefully with users calling close(). The
289
* list of users can be modified elsewhere when the lock isn't
290
* held here, but the sync'ing the srcu with the mutex held
291
* could deadlock. Grab the mutex so that the list will be
292
* unmodified. The user we get will exist as long as the
293
* lock is held. Signal that the qcdev is going away, and
294
* grab a reference to the user so they don't go away for
295
* synchronize_srcu(). Then release the mutex to avoid
296
* deadlock and make sure the user has observed the signal.
297
* With the lock released, we cannot maintain any state of the
298
* user list.
299
*/
300
mutex_lock(&qddev->users_mutex);
301
while (!list_empty(&qddev->users)) {
302
usr = list_first_entry(&qddev->users, struct qaic_user, node);
303
list_del_init(&usr->node);
304
kref_get(&usr->ref_count);
305
usr->qddev = NULL;
306
mutex_unlock(&qddev->users_mutex);
307
synchronize_srcu(&usr->qddev_lock);
308
kref_put(&usr->ref_count, free_usr);
309
mutex_lock(&qddev->users_mutex);
310
}
311
mutex_unlock(&qddev->users_mutex);
312
}
313
314
static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
315
{
316
u16 major = -1, minor = -1;
317
struct qaic_device *qdev;
318
int ret;
319
320
/*
321
* Invoking this function indicates that the control channel to the
322
* device is available. We use that as a signal to indicate that
323
* the device side firmware has booted. The device side firmware
324
* manages the device resources, so we need to communicate with it
325
* via the control channel in order to utilize the device. Therefore
326
* we wait until this signal to create the drm dev that userspace will
327
* use to control the device, because without the device side firmware,
328
* userspace can't do anything useful.
329
*/
330
331
qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
332
333
dev_set_drvdata(&mhi_dev->dev, qdev);
334
qdev->cntl_ch = mhi_dev;
335
336
ret = qaic_control_open(qdev);
337
if (ret) {
338
pci_dbg(qdev->pdev, "%s: control_open failed %d\n", __func__, ret);
339
return ret;
340
}
341
342
qdev->dev_state = QAIC_BOOT;
343
ret = get_cntl_version(qdev, NULL, &major, &minor);
344
if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) {
345
pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n",
346
__func__, major, minor, CNTL_MAJOR, CNTL_MINOR, ret);
347
ret = -EINVAL;
348
goto close_control;
349
}
350
qdev->dev_state = QAIC_ONLINE;
351
kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_ONLINE);
352
353
return ret;
354
355
close_control:
356
qaic_control_close(qdev);
357
return ret;
358
}
359
360
static void qaic_mhi_remove(struct mhi_device *mhi_dev)
361
{
362
/* This is redundant since we have already observed the device crash */
363
}
364
365
static void qaic_notify_reset(struct qaic_device *qdev)
366
{
367
int i;
368
369
kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_OFFLINE);
370
qdev->dev_state = QAIC_OFFLINE;
371
/* wake up any waiters to avoid waiting for timeouts at sync */
372
wake_all_cntl(qdev);
373
for (i = 0; i < qdev->num_dbc; ++i)
374
wakeup_dbc(qdev, i);
375
synchronize_srcu(&qdev->dev_lock);
376
}
377
378
void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
379
{
380
int i;
381
382
qaic_notify_reset(qdev);
383
384
/* start tearing things down */
385
for (i = 0; i < qdev->num_dbc; ++i)
386
release_dbc(qdev, i);
387
}
388
389
static struct qaic_device *create_qdev(struct pci_dev *pdev,
390
const struct qaic_device_config *config)
391
{
392
struct device *dev = &pdev->dev;
393
struct qaic_drm_device *qddev;
394
struct qaic_device *qdev;
395
struct drm_device *drm;
396
int i, ret;
397
398
qdev = devm_kzalloc(dev, sizeof(*qdev), GFP_KERNEL);
399
if (!qdev)
400
return NULL;
401
402
qdev->dev_state = QAIC_OFFLINE;
403
qdev->num_dbc = 16;
404
qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
405
if (!qdev->dbc)
406
return NULL;
407
408
qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
409
if (IS_ERR(qddev))
410
return NULL;
411
412
drm = to_drm(qddev);
413
pci_set_drvdata(pdev, qdev);
414
415
ret = drmm_mutex_init(drm, &qddev->users_mutex);
416
if (ret)
417
return NULL;
418
ret = drmm_add_action_or_reset(drm, qaicm_pci_release, NULL);
419
if (ret)
420
return NULL;
421
ret = drmm_mutex_init(drm, &qdev->cntl_mutex);
422
if (ret)
423
return NULL;
424
ret = drmm_mutex_init(drm, &qdev->bootlog_mutex);
425
if (ret)
426
return NULL;
427
428
qdev->cntl_wq = qaicm_wq_init(drm, "qaic_cntl");
429
if (IS_ERR(qdev->cntl_wq))
430
return NULL;
431
qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts");
432
if (IS_ERR(qdev->qts_wq))
433
return NULL;
434
435
ret = qaicm_srcu_init(drm, &qdev->dev_lock);
436
if (ret)
437
return NULL;
438
439
qdev->qddev = qddev;
440
qdev->pdev = pdev;
441
qddev->qdev = qdev;
442
443
INIT_LIST_HEAD(&qdev->cntl_xfer_list);
444
INIT_LIST_HEAD(&qdev->bootlog);
445
INIT_LIST_HEAD(&qddev->users);
446
447
for (i = 0; i < qdev->num_dbc; ++i) {
448
spin_lock_init(&qdev->dbc[i].xfer_lock);
449
qdev->dbc[i].qdev = qdev;
450
qdev->dbc[i].id = i;
451
INIT_LIST_HEAD(&qdev->dbc[i].xfer_list);
452
ret = qaicm_srcu_init(drm, &qdev->dbc[i].ch_lock);
453
if (ret)
454
return NULL;
455
init_waitqueue_head(&qdev->dbc[i].dbc_release);
456
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
457
}
458
459
return qdev;
460
}
461
462
static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev,
463
const struct qaic_device_config *config)
464
{
465
int bars;
466
int ret;
467
468
bars = pci_select_bars(pdev, IORESOURCE_MEM) & 0x3f;
469
470
/* make sure the device has the expected BARs */
471
if (bars != config->bar_mask) {
472
pci_dbg(pdev, "%s: expected BARs %#x not found in device. Found %#x\n",
473
__func__, config->bar_mask, bars);
474
return -EINVAL;
475
}
476
477
ret = pcim_enable_device(pdev);
478
if (ret)
479
return ret;
480
481
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
482
if (ret)
483
return ret;
484
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
485
486
qdev->bar_mhi = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->mhi_bar_idx]);
487
if (IS_ERR(qdev->bar_mhi))
488
return PTR_ERR(qdev->bar_mhi);
489
490
qdev->bar_dbc = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->dbc_bar_idx]);
491
if (IS_ERR(qdev->bar_dbc))
492
return PTR_ERR(qdev->bar_dbc);
493
494
/* Managed release since we use pcim_enable_device above */
495
pci_set_master(pdev);
496
497
return 0;
498
}
499
500
static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
501
{
502
int irq_count = qdev->num_dbc + 1;
503
int mhi_irq;
504
int ret;
505
int i;
506
507
/* Managed release since we use pcim_enable_device */
508
ret = pci_alloc_irq_vectors(pdev, irq_count, irq_count, PCI_IRQ_MSI | PCI_IRQ_MSIX);
509
if (ret == -ENOSPC) {
510
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
511
if (ret < 0)
512
return ret;
513
514
/*
515
* Operate in one MSI mode. All interrupts will be directed to
516
* MSI0; every interrupt will wake up all the interrupt handlers
517
* (MHI and DBC[0-15]). Since the interrupt is now shared, it is
518
* not disabled during DBC threaded handler, but only one thread
519
* will be allowed to run per DBC, so while it can be
520
* interrupted, it shouldn't race with itself.
521
*/
522
qdev->single_msi = true;
523
pci_info(pdev, "Allocating %d MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n",
524
irq_count);
525
} else if (ret < 0) {
526
return ret;
527
}
528
529
mhi_irq = pci_irq_vector(pdev, 0);
530
if (mhi_irq < 0)
531
return mhi_irq;
532
533
for (i = 0; i < qdev->num_dbc; ++i) {
534
ret = devm_request_threaded_irq(&pdev->dev,
535
pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1),
536
dbc_irq_handler, dbc_irq_threaded_fn, IRQF_SHARED,
537
"qaic_dbc", &qdev->dbc[i]);
538
if (ret)
539
return ret;
540
541
if (datapath_polling) {
542
qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1);
543
if (!qdev->single_msi)
544
disable_irq_nosync(qdev->dbc[i].irq);
545
INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work);
546
}
547
}
548
549
return mhi_irq;
550
}
551
552
static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
553
{
554
struct qaic_device_config *config = (struct qaic_device_config *)id->driver_data;
555
struct qaic_device *qdev;
556
int mhi_irq;
557
int ret;
558
int i;
559
560
qdev = create_qdev(pdev, config);
561
if (!qdev)
562
return -ENOMEM;
563
564
ret = init_pci(qdev, pdev, config);
565
if (ret)
566
return ret;
567
568
for (i = 0; i < qdev->num_dbc; ++i)
569
qdev->dbc[i].dbc_base = qdev->bar_dbc + QAIC_DBC_OFF(i);
570
571
mhi_irq = init_msi(qdev, pdev);
572
if (mhi_irq < 0)
573
return mhi_irq;
574
575
ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
576
if (ret)
577
return ret;
578
579
qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_mhi, mhi_irq,
580
qdev->single_msi, config->family);
581
if (IS_ERR(qdev->mhi_cntrl)) {
582
ret = PTR_ERR(qdev->mhi_cntrl);
583
qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
584
return ret;
585
}
586
587
return 0;
588
}
589
590
static void qaic_pci_remove(struct pci_dev *pdev)
591
{
592
struct qaic_device *qdev = pci_get_drvdata(pdev);
593
594
if (!qdev)
595
return;
596
597
qaic_dev_reset_clean_local_state(qdev);
598
qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
599
qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
600
}
601
602
static void qaic_pci_shutdown(struct pci_dev *pdev)
603
{
604
/* see qaic_exit for what link_up is doing */
605
link_up = true;
606
qaic_pci_remove(pdev);
607
}
608
609
static pci_ers_result_t qaic_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t error)
610
{
611
return PCI_ERS_RESULT_NEED_RESET;
612
}
613
614
static void qaic_pci_reset_prepare(struct pci_dev *pdev)
615
{
616
struct qaic_device *qdev = pci_get_drvdata(pdev);
617
618
qaic_notify_reset(qdev);
619
qaic_mhi_start_reset(qdev->mhi_cntrl);
620
qaic_dev_reset_clean_local_state(qdev);
621
}
622
623
static void qaic_pci_reset_done(struct pci_dev *pdev)
624
{
625
struct qaic_device *qdev = pci_get_drvdata(pdev);
626
627
qaic_mhi_reset_done(qdev->mhi_cntrl);
628
}
629
630
static const struct mhi_device_id qaic_mhi_match_table[] = {
631
{ .chan = "QAIC_CONTROL", },
632
{},
633
};
634
635
static struct mhi_driver qaic_mhi_driver = {
636
.id_table = qaic_mhi_match_table,
637
.remove = qaic_mhi_remove,
638
.probe = qaic_mhi_probe,
639
.ul_xfer_cb = qaic_mhi_ul_xfer_cb,
640
.dl_xfer_cb = qaic_mhi_dl_xfer_cb,
641
.driver = {
642
.name = "qaic_mhi",
643
},
644
};
645
646
static const struct pci_device_id qaic_ids[] = {
647
{ PCI_DEVICE_DATA(QCOM, AIC080, (kernel_ulong_t)&aic080_config), },
648
{ PCI_DEVICE_DATA(QCOM, AIC100, (kernel_ulong_t)&aic100_config), },
649
{ PCI_DEVICE_DATA(QCOM, AIC200, (kernel_ulong_t)&aic200_config), },
650
{ }
651
};
652
MODULE_DEVICE_TABLE(pci, qaic_ids);
653
654
static const struct pci_error_handlers qaic_pci_err_handler = {
655
.error_detected = qaic_pci_error_detected,
656
.reset_prepare = qaic_pci_reset_prepare,
657
.reset_done = qaic_pci_reset_done,
658
};
659
660
static struct pci_driver qaic_pci_driver = {
661
.name = QAIC_NAME,
662
.id_table = qaic_ids,
663
.probe = qaic_pci_probe,
664
.remove = qaic_pci_remove,
665
.shutdown = qaic_pci_shutdown,
666
.err_handler = &qaic_pci_err_handler,
667
};
668
669
static int __init qaic_init(void)
670
{
671
int ret;
672
673
ret = pci_register_driver(&qaic_pci_driver);
674
if (ret) {
675
pr_debug("qaic: pci_register_driver failed %d\n", ret);
676
return ret;
677
}
678
679
ret = mhi_driver_register(&qaic_mhi_driver);
680
if (ret) {
681
pr_debug("qaic: mhi_driver_register failed %d\n", ret);
682
goto free_pci;
683
}
684
685
ret = sahara_register();
686
if (ret) {
687
pr_debug("qaic: sahara_register failed %d\n", ret);
688
goto free_mhi;
689
}
690
691
ret = qaic_timesync_init();
692
if (ret)
693
pr_debug("qaic: qaic_timesync_init failed %d\n", ret);
694
695
ret = qaic_bootlog_register();
696
if (ret)
697
pr_debug("qaic: qaic_bootlog_register failed %d\n", ret);
698
699
ret = qaic_ras_register();
700
if (ret)
701
pr_debug("qaic: qaic_ras_register failed %d\n", ret);
702
703
return 0;
704
705
free_mhi:
706
mhi_driver_unregister(&qaic_mhi_driver);
707
free_pci:
708
pci_unregister_driver(&qaic_pci_driver);
709
return ret;
710
}
711
712
static void __exit qaic_exit(void)
713
{
714
/*
715
* We assume that qaic_pci_remove() is called due to a hotplug event
716
* which would mean that the link is down, and thus
717
* qaic_mhi_free_controller() should not try to access the device during
718
* cleanup.
719
* We call pci_unregister_driver() below, which also triggers
720
* qaic_pci_remove(), but since this is module exit, we expect the link
721
* to the device to be up, in which case qaic_mhi_free_controller()
722
* should try to access the device during cleanup to put the device in
723
* a sane state.
724
* For that reason, we set link_up here to let qaic_mhi_free_controller
725
* know the expected link state. Since the module is going to be
726
* removed at the end of this, we don't need to worry about
727
* reinitializing the link_up state after the cleanup is done.
728
*/
729
link_up = true;
730
qaic_ras_unregister();
731
qaic_bootlog_unregister();
732
qaic_timesync_deinit();
733
sahara_unregister();
734
mhi_driver_unregister(&qaic_mhi_driver);
735
pci_unregister_driver(&qaic_pci_driver);
736
}
737
738
module_init(qaic_init);
739
module_exit(qaic_exit);
740
741
MODULE_AUTHOR(QAIC_DESC " Kernel Driver Team");
742
MODULE_DESCRIPTION(QAIC_DESC " Accel Driver");
743
MODULE_LICENSE("GPL");
744
745