Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/idxd/init.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3
#include <linux/init.h>
4
#include <linux/kernel.h>
5
#include <linux/module.h>
6
#include <linux/slab.h>
7
#include <linux/pci.h>
8
#include <linux/interrupt.h>
9
#include <linux/delay.h>
10
#include <linux/dma-mapping.h>
11
#include <linux/workqueue.h>
12
#include <linux/fs.h>
13
#include <linux/io-64-nonatomic-lo-hi.h>
14
#include <linux/device.h>
15
#include <linux/idr.h>
16
#include <linux/iommu.h>
17
#include <uapi/linux/idxd.h>
18
#include <linux/dmaengine.h>
19
#include "../dmaengine.h"
20
#include "registers.h"
21
#include "idxd.h"
22
#include "perfmon.h"
23
24
MODULE_VERSION(IDXD_DRIVER_VERSION);
25
MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common driver");
26
MODULE_LICENSE("GPL v2");
27
MODULE_AUTHOR("Intel Corporation");
28
MODULE_IMPORT_NS("IDXD");
29
30
static bool sva = true;
31
module_param(sva, bool, 0644);
32
MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
33
34
bool tc_override;
35
module_param(tc_override, bool, 0644);
36
MODULE_PARM_DESC(tc_override, "Override traffic class defaults");
37
38
#define DRV_NAME "idxd"
39
40
bool support_enqcmd;
41
DEFINE_IDA(idxd_ida);
42
43
static struct idxd_driver_data idxd_driver_data[] = {
44
[IDXD_TYPE_DSA] = {
45
.name_prefix = "dsa",
46
.type = IDXD_TYPE_DSA,
47
.compl_size = sizeof(struct dsa_completion_record),
48
.align = 32,
49
.dev_type = &dsa_device_type,
50
.evl_cr_off = offsetof(struct dsa_evl_entry, cr),
51
.user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
52
.cr_status_off = offsetof(struct dsa_completion_record, status),
53
.cr_result_off = offsetof(struct dsa_completion_record, result),
54
},
55
[IDXD_TYPE_IAX] = {
56
.name_prefix = "iax",
57
.type = IDXD_TYPE_IAX,
58
.compl_size = sizeof(struct iax_completion_record),
59
.align = 64,
60
.dev_type = &iax_device_type,
61
.evl_cr_off = offsetof(struct iax_evl_entry, cr),
62
.user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
63
.cr_status_off = offsetof(struct iax_completion_record, status),
64
.cr_result_off = offsetof(struct iax_completion_record, error_code),
65
.load_device_defaults = idxd_load_iaa_device_defaults,
66
},
67
};
68
69
static struct pci_device_id idxd_pci_tbl[] = {
70
/* DSA ver 1.0 platforms */
71
{ PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
72
/* DSA on GNR-D platforms */
73
{ PCI_DEVICE_DATA(INTEL, DSA_GNRD, &idxd_driver_data[IDXD_TYPE_DSA]) },
74
/* DSA on DMR platforms */
75
{ PCI_DEVICE_DATA(INTEL, DSA_DMR, &idxd_driver_data[IDXD_TYPE_DSA]) },
76
77
/* IAX ver 1.0 platforms */
78
{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
79
/* IAA on DMR platforms */
80
{ PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
81
/* IAA PTL platforms */
82
{ PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
83
{ 0, }
84
};
85
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
86
87
static int idxd_setup_interrupts(struct idxd_device *idxd)
88
{
89
struct pci_dev *pdev = idxd->pdev;
90
struct device *dev = &pdev->dev;
91
struct idxd_irq_entry *ie;
92
int i, msixcnt;
93
int rc = 0;
94
95
msixcnt = pci_msix_vec_count(pdev);
96
if (msixcnt < 0) {
97
dev_err(dev, "Not MSI-X interrupt capable.\n");
98
return -ENOSPC;
99
}
100
idxd->irq_cnt = msixcnt;
101
102
rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
103
if (rc != msixcnt) {
104
dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
105
return -ENOSPC;
106
}
107
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
108
109
110
ie = idxd_get_ie(idxd, 0);
111
ie->vector = pci_irq_vector(pdev, 0);
112
rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
113
if (rc < 0) {
114
dev_err(dev, "Failed to allocate misc interrupt.\n");
115
goto err_misc_irq;
116
}
117
dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector);
118
119
for (i = 0; i < idxd->max_wqs; i++) {
120
int msix_idx = i + 1;
121
122
ie = idxd_get_ie(idxd, msix_idx);
123
ie->id = msix_idx;
124
ie->int_handle = INVALID_INT_HANDLE;
125
ie->pasid = IOMMU_PASID_INVALID;
126
127
spin_lock_init(&ie->list_lock);
128
init_llist_head(&ie->pending_llist);
129
INIT_LIST_HEAD(&ie->work_list);
130
}
131
132
idxd_unmask_error_interrupts(idxd);
133
return 0;
134
135
err_misc_irq:
136
idxd_mask_error_interrupts(idxd);
137
pci_free_irq_vectors(pdev);
138
dev_err(dev, "No usable interrupts\n");
139
return rc;
140
}
141
142
static void idxd_cleanup_interrupts(struct idxd_device *idxd)
143
{
144
struct pci_dev *pdev = idxd->pdev;
145
struct idxd_irq_entry *ie;
146
int msixcnt;
147
148
msixcnt = pci_msix_vec_count(pdev);
149
if (msixcnt <= 0)
150
return;
151
152
ie = idxd_get_ie(idxd, 0);
153
idxd_mask_error_interrupts(idxd);
154
free_irq(ie->vector, ie);
155
pci_free_irq_vectors(pdev);
156
}
157
158
static void idxd_clean_wqs(struct idxd_device *idxd)
159
{
160
struct idxd_wq *wq;
161
struct device *conf_dev;
162
int i;
163
164
for (i = 0; i < idxd->max_wqs; i++) {
165
wq = idxd->wqs[i];
166
if (idxd->hw.wq_cap.op_config)
167
bitmap_free(wq->opcap_bmap);
168
kfree(wq->wqcfg);
169
conf_dev = wq_confdev(wq);
170
put_device(conf_dev);
171
kfree(wq);
172
}
173
bitmap_free(idxd->wq_enable_map);
174
kfree(idxd->wqs);
175
}
176
177
static int idxd_setup_wqs(struct idxd_device *idxd)
178
{
179
struct device *dev = &idxd->pdev->dev;
180
struct idxd_wq *wq;
181
struct device *conf_dev;
182
int i, rc;
183
184
idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
185
GFP_KERNEL, dev_to_node(dev));
186
if (!idxd->wqs)
187
return -ENOMEM;
188
189
idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
190
if (!idxd->wq_enable_map) {
191
rc = -ENOMEM;
192
goto err_bitmap;
193
}
194
195
for (i = 0; i < idxd->max_wqs; i++) {
196
wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
197
if (!wq) {
198
rc = -ENOMEM;
199
goto err;
200
}
201
202
idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
203
conf_dev = wq_confdev(wq);
204
wq->id = i;
205
wq->idxd = idxd;
206
device_initialize(wq_confdev(wq));
207
conf_dev->parent = idxd_confdev(idxd);
208
conf_dev->bus = &dsa_bus_type;
209
conf_dev->type = &idxd_wq_device_type;
210
rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
211
if (rc < 0)
212
goto err;
213
214
mutex_init(&wq->wq_lock);
215
init_waitqueue_head(&wq->err_queue);
216
init_completion(&wq->wq_dead);
217
init_completion(&wq->wq_resurrect);
218
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
219
idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
220
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
221
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
222
if (!wq->wqcfg) {
223
rc = -ENOMEM;
224
goto err;
225
}
226
227
if (idxd->hw.wq_cap.op_config) {
228
wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
229
if (!wq->opcap_bmap) {
230
rc = -ENOMEM;
231
goto err_opcap_bmap;
232
}
233
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
234
}
235
mutex_init(&wq->uc_lock);
236
xa_init(&wq->upasid_xa);
237
idxd->wqs[i] = wq;
238
}
239
240
return 0;
241
242
err_opcap_bmap:
243
kfree(wq->wqcfg);
244
245
err:
246
put_device(conf_dev);
247
kfree(wq);
248
249
while (--i >= 0) {
250
wq = idxd->wqs[i];
251
if (idxd->hw.wq_cap.op_config)
252
bitmap_free(wq->opcap_bmap);
253
kfree(wq->wqcfg);
254
conf_dev = wq_confdev(wq);
255
put_device(conf_dev);
256
kfree(wq);
257
258
}
259
bitmap_free(idxd->wq_enable_map);
260
261
err_bitmap:
262
kfree(idxd->wqs);
263
264
return rc;
265
}
266
267
static void idxd_clean_engines(struct idxd_device *idxd)
268
{
269
struct idxd_engine *engine;
270
struct device *conf_dev;
271
int i;
272
273
for (i = 0; i < idxd->max_engines; i++) {
274
engine = idxd->engines[i];
275
conf_dev = engine_confdev(engine);
276
put_device(conf_dev);
277
kfree(engine);
278
}
279
kfree(idxd->engines);
280
}
281
282
static int idxd_setup_engines(struct idxd_device *idxd)
283
{
284
struct idxd_engine *engine;
285
struct device *dev = &idxd->pdev->dev;
286
struct device *conf_dev;
287
int i, rc;
288
289
idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
290
GFP_KERNEL, dev_to_node(dev));
291
if (!idxd->engines)
292
return -ENOMEM;
293
294
for (i = 0; i < idxd->max_engines; i++) {
295
engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
296
if (!engine) {
297
rc = -ENOMEM;
298
goto err;
299
}
300
301
idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE);
302
conf_dev = engine_confdev(engine);
303
engine->id = i;
304
engine->idxd = idxd;
305
device_initialize(conf_dev);
306
conf_dev->parent = idxd_confdev(idxd);
307
conf_dev->bus = &dsa_bus_type;
308
conf_dev->type = &idxd_engine_device_type;
309
rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
310
if (rc < 0) {
311
put_device(conf_dev);
312
kfree(engine);
313
goto err;
314
}
315
316
idxd->engines[i] = engine;
317
}
318
319
return 0;
320
321
err:
322
while (--i >= 0) {
323
engine = idxd->engines[i];
324
conf_dev = engine_confdev(engine);
325
put_device(conf_dev);
326
kfree(engine);
327
}
328
kfree(idxd->engines);
329
330
return rc;
331
}
332
333
static void idxd_clean_groups(struct idxd_device *idxd)
334
{
335
struct idxd_group *group;
336
int i;
337
338
for (i = 0; i < idxd->max_groups; i++) {
339
group = idxd->groups[i];
340
put_device(group_confdev(group));
341
kfree(group);
342
}
343
kfree(idxd->groups);
344
}
345
346
static int idxd_setup_groups(struct idxd_device *idxd)
347
{
348
struct device *dev = &idxd->pdev->dev;
349
struct device *conf_dev;
350
struct idxd_group *group;
351
int i, rc;
352
353
idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
354
GFP_KERNEL, dev_to_node(dev));
355
if (!idxd->groups)
356
return -ENOMEM;
357
358
for (i = 0; i < idxd->max_groups; i++) {
359
group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
360
if (!group) {
361
rc = -ENOMEM;
362
goto err;
363
}
364
365
idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP);
366
conf_dev = group_confdev(group);
367
group->id = i;
368
group->idxd = idxd;
369
device_initialize(conf_dev);
370
conf_dev->parent = idxd_confdev(idxd);
371
conf_dev->bus = &dsa_bus_type;
372
conf_dev->type = &idxd_group_device_type;
373
rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
374
if (rc < 0) {
375
put_device(conf_dev);
376
kfree(group);
377
goto err;
378
}
379
380
idxd->groups[i] = group;
381
if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
382
group->tc_a = 1;
383
group->tc_b = 1;
384
} else {
385
group->tc_a = -1;
386
group->tc_b = -1;
387
}
388
/*
389
* The default value is the same as the value of
390
* total read buffers in GRPCAP.
391
*/
392
group->rdbufs_allowed = idxd->max_rdbufs;
393
}
394
395
return 0;
396
397
err:
398
while (--i >= 0) {
399
group = idxd->groups[i];
400
put_device(group_confdev(group));
401
kfree(group);
402
}
403
kfree(idxd->groups);
404
405
return rc;
406
}
407
408
static void idxd_cleanup_internals(struct idxd_device *idxd)
409
{
410
idxd_clean_groups(idxd);
411
idxd_clean_engines(idxd);
412
idxd_clean_wqs(idxd);
413
destroy_workqueue(idxd->wq);
414
}
415
416
static int idxd_init_evl(struct idxd_device *idxd)
417
{
418
struct device *dev = &idxd->pdev->dev;
419
unsigned int evl_cache_size;
420
struct idxd_evl *evl;
421
const char *idxd_name;
422
423
if (idxd->hw.gen_cap.evl_support == 0)
424
return 0;
425
426
evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev));
427
if (!evl)
428
return -ENOMEM;
429
430
mutex_init(&evl->lock);
431
evl->size = IDXD_EVL_SIZE_MIN;
432
433
idxd_name = dev_name(idxd_confdev(idxd));
434
evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
435
/*
436
* Since completion record in evl_cache will be copied to user
437
* when handling completion record page fault, need to create
438
* the cache suitable for user copy.
439
*/
440
idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
441
0, 0, 0, evl_cache_size,
442
NULL);
443
if (!idxd->evl_cache) {
444
kfree(evl);
445
return -ENOMEM;
446
}
447
448
idxd->evl = evl;
449
return 0;
450
}
451
452
static int idxd_setup_internals(struct idxd_device *idxd)
453
{
454
struct device *dev = &idxd->pdev->dev;
455
int rc;
456
457
init_waitqueue_head(&idxd->cmd_waitq);
458
459
rc = idxd_setup_wqs(idxd);
460
if (rc < 0)
461
goto err_wqs;
462
463
rc = idxd_setup_engines(idxd);
464
if (rc < 0)
465
goto err_engine;
466
467
rc = idxd_setup_groups(idxd);
468
if (rc < 0)
469
goto err_group;
470
471
idxd->wq = create_workqueue(dev_name(dev));
472
if (!idxd->wq) {
473
rc = -ENOMEM;
474
goto err_wkq_create;
475
}
476
477
rc = idxd_init_evl(idxd);
478
if (rc < 0)
479
goto err_evl;
480
481
return 0;
482
483
err_evl:
484
destroy_workqueue(idxd->wq);
485
err_wkq_create:
486
idxd_clean_groups(idxd);
487
err_group:
488
idxd_clean_engines(idxd);
489
err_engine:
490
idxd_clean_wqs(idxd);
491
err_wqs:
492
return rc;
493
}
494
495
static void idxd_read_table_offsets(struct idxd_device *idxd)
496
{
497
union offsets_reg offsets;
498
struct device *dev = &idxd->pdev->dev;
499
500
offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
501
offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
502
idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
503
dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
504
idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
505
dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
506
idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
507
dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
508
idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
509
dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
510
}
511
512
void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
513
{
514
int i, j, nr;
515
516
for (i = 0, nr = 0; i < count; i++) {
517
for (j = 0; j < BITS_PER_LONG_LONG; j++) {
518
if (val[i] & BIT(j))
519
set_bit(nr, bmap);
520
nr++;
521
}
522
}
523
}
524
525
static void idxd_read_caps(struct idxd_device *idxd)
526
{
527
struct device *dev = &idxd->pdev->dev;
528
int i;
529
530
/* reading generic capabilities */
531
idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
532
dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
533
534
if (idxd->hw.gen_cap.cmd_cap) {
535
idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
536
dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
537
}
538
539
/* reading command capabilities */
540
if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))
541
idxd->request_int_handles = true;
542
543
idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
544
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
545
idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift);
546
dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
547
if (idxd->hw.gen_cap.config_en)
548
set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
549
550
/* reading group capabilities */
551
idxd->hw.group_cap.bits =
552
ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
553
dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
554
idxd->max_groups = idxd->hw.group_cap.num_groups;
555
dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
556
idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs;
557
dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs);
558
idxd->nr_rdbufs = idxd->max_rdbufs;
559
560
/* read engine capabilities */
561
idxd->hw.engine_cap.bits =
562
ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
563
dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
564
idxd->max_engines = idxd->hw.engine_cap.num_engines;
565
dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
566
567
/* read workqueue capabilities */
568
idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
569
dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
570
idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
571
dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
572
idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
573
dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
574
idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
575
dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
576
577
/* reading operation capabilities */
578
for (i = 0; i < 4; i++) {
579
idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
580
IDXD_OPCAP_OFFSET + i * sizeof(u64));
581
dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
582
}
583
multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
584
585
/* read iaa cap */
586
if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2)
587
idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
588
}
589
590
static void idxd_free(struct idxd_device *idxd)
591
{
592
if (!idxd)
593
return;
594
595
put_device(idxd_confdev(idxd));
596
bitmap_free(idxd->opcap_bmap);
597
ida_free(&idxd_ida, idxd->id);
598
kfree(idxd);
599
}
600
601
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
602
{
603
struct device *dev = &pdev->dev;
604
struct device *conf_dev;
605
struct idxd_device *idxd;
606
int rc;
607
608
idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
609
if (!idxd)
610
return NULL;
611
612
conf_dev = idxd_confdev(idxd);
613
idxd->pdev = pdev;
614
idxd->data = data;
615
idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
616
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
617
if (idxd->id < 0)
618
goto err_ida;
619
620
idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
621
if (!idxd->opcap_bmap)
622
goto err_opcap;
623
624
device_initialize(conf_dev);
625
conf_dev->parent = dev;
626
conf_dev->bus = &dsa_bus_type;
627
conf_dev->type = idxd->data->dev_type;
628
rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
629
if (rc < 0)
630
goto err_name;
631
632
spin_lock_init(&idxd->dev_lock);
633
spin_lock_init(&idxd->cmd_lock);
634
635
return idxd;
636
637
err_name:
638
put_device(conf_dev);
639
bitmap_free(idxd->opcap_bmap);
640
err_opcap:
641
ida_free(&idxd_ida, idxd->id);
642
err_ida:
643
kfree(idxd);
644
645
return NULL;
646
}
647
648
static int idxd_enable_system_pasid(struct idxd_device *idxd)
649
{
650
struct pci_dev *pdev = idxd->pdev;
651
struct device *dev = &pdev->dev;
652
struct iommu_domain *domain;
653
ioasid_t pasid;
654
int ret;
655
656
/*
657
* Attach a global PASID to the DMA domain so that we can use ENQCMDS
658
* to submit work on buffers mapped by DMA API.
659
*/
660
domain = iommu_get_domain_for_dev(dev);
661
if (!domain)
662
return -EPERM;
663
664
pasid = iommu_alloc_global_pasid(dev);
665
if (pasid == IOMMU_PASID_INVALID)
666
return -ENOSPC;
667
668
/*
669
* DMA domain is owned by the driver, it should support all valid
670
* types such as DMA-FQ, identity, etc.
671
*/
672
ret = iommu_attach_device_pasid(domain, dev, pasid, NULL);
673
if (ret) {
674
dev_err(dev, "failed to attach device pasid %d, domain type %d",
675
pasid, domain->type);
676
iommu_free_global_pasid(pasid);
677
return ret;
678
}
679
680
/* Since we set user privilege for kernel DMA, enable completion IRQ */
681
idxd_set_user_intr(idxd, 1);
682
idxd->pasid = pasid;
683
684
return ret;
685
}
686
687
static void idxd_disable_system_pasid(struct idxd_device *idxd)
688
{
689
struct pci_dev *pdev = idxd->pdev;
690
struct device *dev = &pdev->dev;
691
struct iommu_domain *domain;
692
693
domain = iommu_get_domain_for_dev(dev);
694
if (!domain)
695
return;
696
697
iommu_detach_device_pasid(domain, dev, idxd->pasid);
698
iommu_free_global_pasid(idxd->pasid);
699
700
idxd_set_user_intr(idxd, 0);
701
idxd->sva = NULL;
702
idxd->pasid = IOMMU_PASID_INVALID;
703
}
704
705
static int idxd_probe(struct idxd_device *idxd)
706
{
707
struct pci_dev *pdev = idxd->pdev;
708
struct device *dev = &pdev->dev;
709
int rc;
710
711
dev_dbg(dev, "%s entered and resetting device\n", __func__);
712
rc = idxd_device_init_reset(idxd);
713
if (rc < 0)
714
return rc;
715
716
dev_dbg(dev, "IDXD reset complete\n");
717
718
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
719
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
720
721
rc = idxd_enable_system_pasid(idxd);
722
if (rc)
723
dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
724
else
725
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
726
} else if (!sva) {
727
dev_warn(dev, "User forced SVA off via module param.\n");
728
}
729
730
idxd_read_caps(idxd);
731
idxd_read_table_offsets(idxd);
732
733
rc = idxd_setup_internals(idxd);
734
if (rc)
735
goto err;
736
737
/* If the configs are readonly, then load them from device */
738
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
739
dev_dbg(dev, "Loading RO device config\n");
740
rc = idxd_device_load_config(idxd);
741
if (rc < 0)
742
goto err_config;
743
}
744
745
rc = idxd_setup_interrupts(idxd);
746
if (rc)
747
goto err_config;
748
749
idxd->major = idxd_cdev_get_major(idxd);
750
751
rc = perfmon_pmu_init(idxd);
752
if (rc < 0)
753
dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
754
755
dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
756
return 0;
757
758
err_config:
759
idxd_cleanup_internals(idxd);
760
err:
761
if (device_pasid_enabled(idxd))
762
idxd_disable_system_pasid(idxd);
763
return rc;
764
}
765
766
static void idxd_cleanup(struct idxd_device *idxd)
767
{
768
perfmon_pmu_remove(idxd);
769
idxd_cleanup_interrupts(idxd);
770
idxd_cleanup_internals(idxd);
771
if (device_pasid_enabled(idxd))
772
idxd_disable_system_pasid(idxd);
773
}
774
775
/*
776
* Attach IDXD device to IDXD driver.
777
*/
778
static int idxd_bind(struct device_driver *drv, const char *buf)
779
{
780
const struct bus_type *bus = drv->bus;
781
struct device *dev;
782
int err = -ENODEV;
783
784
dev = bus_find_device_by_name(bus, NULL, buf);
785
if (dev)
786
err = device_driver_attach(drv, dev);
787
788
put_device(dev);
789
790
return err;
791
}
792
793
/*
794
* Detach IDXD device from driver.
795
*/
796
static void idxd_unbind(struct device_driver *drv, const char *buf)
797
{
798
const struct bus_type *bus = drv->bus;
799
struct device *dev;
800
801
dev = bus_find_device_by_name(bus, NULL, buf);
802
if (dev && dev->driver == drv)
803
device_release_driver(dev);
804
805
put_device(dev);
806
}
807
808
#define idxd_free_saved_configs(saved_configs, count) \
809
do { \
810
int i; \
811
\
812
for (i = 0; i < (count); i++) \
813
kfree(saved_configs[i]); \
814
} while (0)
815
816
static void idxd_free_saved(struct idxd_group **saved_groups,
817
struct idxd_engine **saved_engines,
818
struct idxd_wq **saved_wqs,
819
struct idxd_device *idxd)
820
{
821
if (saved_groups)
822
idxd_free_saved_configs(saved_groups, idxd->max_groups);
823
if (saved_engines)
824
idxd_free_saved_configs(saved_engines, idxd->max_engines);
825
if (saved_wqs)
826
idxd_free_saved_configs(saved_wqs, idxd->max_wqs);
827
}
828
829
/*
830
* Save IDXD device configurations including engines, groups, wqs etc.
831
* The saved configurations can be restored when needed.
832
*/
833
static int idxd_device_config_save(struct idxd_device *idxd,
834
struct idxd_saved_states *idxd_saved)
835
{
836
struct device *dev = &idxd->pdev->dev;
837
int i;
838
839
memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd));
840
841
if (idxd->evl) {
842
memcpy(&idxd_saved->saved_evl, idxd->evl,
843
sizeof(struct idxd_evl));
844
}
845
846
struct idxd_group **saved_groups __free(kfree) =
847
kcalloc_node(idxd->max_groups,
848
sizeof(struct idxd_group *),
849
GFP_KERNEL, dev_to_node(dev));
850
if (!saved_groups)
851
return -ENOMEM;
852
853
for (i = 0; i < idxd->max_groups; i++) {
854
struct idxd_group *saved_group __free(kfree) =
855
kzalloc_node(sizeof(*saved_group), GFP_KERNEL,
856
dev_to_node(dev));
857
858
if (!saved_group) {
859
/* Free saved groups */
860
idxd_free_saved(saved_groups, NULL, NULL, idxd);
861
862
return -ENOMEM;
863
}
864
865
memcpy(saved_group, idxd->groups[i], sizeof(*saved_group));
866
saved_groups[i] = no_free_ptr(saved_group);
867
}
868
869
struct idxd_engine **saved_engines =
870
kcalloc_node(idxd->max_engines,
871
sizeof(struct idxd_engine *),
872
GFP_KERNEL, dev_to_node(dev));
873
if (!saved_engines) {
874
/* Free saved groups */
875
idxd_free_saved(saved_groups, NULL, NULL, idxd);
876
877
return -ENOMEM;
878
}
879
for (i = 0; i < idxd->max_engines; i++) {
880
struct idxd_engine *saved_engine __free(kfree) =
881
kzalloc_node(sizeof(*saved_engine), GFP_KERNEL,
882
dev_to_node(dev));
883
if (!saved_engine) {
884
/* Free saved groups and engines */
885
idxd_free_saved(saved_groups, saved_engines, NULL,
886
idxd);
887
888
return -ENOMEM;
889
}
890
891
memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine));
892
saved_engines[i] = no_free_ptr(saved_engine);
893
}
894
895
unsigned long *saved_wq_enable_map __free(bitmap) =
896
bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL,
897
dev_to_node(dev));
898
if (!saved_wq_enable_map) {
899
/* Free saved groups and engines */
900
idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
901
902
return -ENOMEM;
903
}
904
905
bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs);
906
907
struct idxd_wq **saved_wqs __free(kfree) =
908
kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
909
GFP_KERNEL, dev_to_node(dev));
910
if (!saved_wqs) {
911
/* Free saved groups and engines */
912
idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
913
914
return -ENOMEM;
915
}
916
917
for (i = 0; i < idxd->max_wqs; i++) {
918
struct idxd_wq *saved_wq __free(kfree) =
919
kzalloc_node(sizeof(*saved_wq), GFP_KERNEL,
920
dev_to_node(dev));
921
struct idxd_wq *wq;
922
923
if (!saved_wq) {
924
/* Free saved groups, engines, and wqs */
925
idxd_free_saved(saved_groups, saved_engines, saved_wqs,
926
idxd);
927
928
return -ENOMEM;
929
}
930
931
if (!test_bit(i, saved_wq_enable_map))
932
continue;
933
934
wq = idxd->wqs[i];
935
mutex_lock(&wq->wq_lock);
936
memcpy(saved_wq, wq, sizeof(*saved_wq));
937
saved_wqs[i] = no_free_ptr(saved_wq);
938
mutex_unlock(&wq->wq_lock);
939
}
940
941
/* Save configurations */
942
idxd_saved->saved_groups = no_free_ptr(saved_groups);
943
idxd_saved->saved_engines = no_free_ptr(saved_engines);
944
idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map);
945
idxd_saved->saved_wqs = no_free_ptr(saved_wqs);
946
947
return 0;
948
}
949
950
/*
951
* Restore IDXD device configurations including engines, groups, wqs etc
952
* that were saved before.
953
*/
954
static void idxd_device_config_restore(struct idxd_device *idxd,
955
struct idxd_saved_states *idxd_saved)
956
{
957
struct idxd_evl *saved_evl = &idxd_saved->saved_evl;
958
int i;
959
960
idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
961
962
idxd->evl->size = saved_evl->size;
963
964
for (i = 0; i < idxd->max_groups; i++) {
965
struct idxd_group *saved_group, *group;
966
967
saved_group = idxd_saved->saved_groups[i];
968
group = idxd->groups[i];
969
970
group->rdbufs_allowed = saved_group->rdbufs_allowed;
971
group->rdbufs_reserved = saved_group->rdbufs_reserved;
972
group->tc_a = saved_group->tc_a;
973
group->tc_b = saved_group->tc_b;
974
group->use_rdbuf_limit = saved_group->use_rdbuf_limit;
975
976
kfree(saved_group);
977
}
978
kfree(idxd_saved->saved_groups);
979
980
for (i = 0; i < idxd->max_engines; i++) {
981
struct idxd_engine *saved_engine, *engine;
982
983
saved_engine = idxd_saved->saved_engines[i];
984
engine = idxd->engines[i];
985
986
engine->group = saved_engine->group;
987
988
kfree(saved_engine);
989
}
990
kfree(idxd_saved->saved_engines);
991
992
bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map,
993
idxd->max_wqs);
994
bitmap_free(idxd_saved->saved_wq_enable_map);
995
996
for (i = 0; i < idxd->max_wqs; i++) {
997
struct idxd_wq *saved_wq, *wq;
998
size_t len;
999
1000
if (!test_bit(i, idxd->wq_enable_map))
1001
continue;
1002
1003
saved_wq = idxd_saved->saved_wqs[i];
1004
wq = idxd->wqs[i];
1005
1006
mutex_lock(&wq->wq_lock);
1007
1008
wq->group = saved_wq->group;
1009
wq->flags = saved_wq->flags;
1010
wq->threshold = saved_wq->threshold;
1011
wq->size = saved_wq->size;
1012
wq->priority = saved_wq->priority;
1013
wq->type = saved_wq->type;
1014
len = strlen(saved_wq->name) + 1;
1015
strscpy(wq->name, saved_wq->name, len);
1016
wq->max_xfer_bytes = saved_wq->max_xfer_bytes;
1017
wq->max_batch_size = saved_wq->max_batch_size;
1018
wq->enqcmds_retries = saved_wq->enqcmds_retries;
1019
wq->descs = saved_wq->descs;
1020
wq->idxd_chan = saved_wq->idxd_chan;
1021
len = strlen(saved_wq->driver_name) + 1;
1022
strscpy(wq->driver_name, saved_wq->driver_name, len);
1023
1024
mutex_unlock(&wq->wq_lock);
1025
1026
kfree(saved_wq);
1027
}
1028
1029
kfree(idxd_saved->saved_wqs);
1030
}
1031
1032
static void idxd_reset_prepare(struct pci_dev *pdev)
1033
{
1034
struct idxd_device *idxd = pci_get_drvdata(pdev);
1035
struct device *dev = &idxd->pdev->dev;
1036
const char *idxd_name;
1037
int rc;
1038
1039
idxd_name = dev_name(idxd_confdev(idxd));
1040
1041
struct idxd_saved_states *idxd_saved __free(kfree) =
1042
kzalloc_node(sizeof(*idxd_saved), GFP_KERNEL,
1043
dev_to_node(&pdev->dev));
1044
if (!idxd_saved) {
1045
dev_err(dev, "HALT: no memory\n");
1046
1047
return;
1048
}
1049
1050
/* Save IDXD configurations. */
1051
rc = idxd_device_config_save(idxd, idxd_saved);
1052
if (rc < 0) {
1053
dev_err(dev, "HALT: cannot save %s configs\n", idxd_name);
1054
1055
return;
1056
}
1057
1058
idxd->idxd_saved = no_free_ptr(idxd_saved);
1059
1060
/* Save PCI device state. */
1061
pci_save_state(idxd->pdev);
1062
}
1063
1064
static void idxd_reset_done(struct pci_dev *pdev)
1065
{
1066
struct idxd_device *idxd = pci_get_drvdata(pdev);
1067
const char *idxd_name;
1068
struct device *dev;
1069
int rc, i;
1070
1071
if (!idxd->idxd_saved)
1072
return;
1073
1074
dev = &idxd->pdev->dev;
1075
idxd_name = dev_name(idxd_confdev(idxd));
1076
1077
/* Restore PCI device state. */
1078
pci_restore_state(idxd->pdev);
1079
1080
/* Unbind idxd device from driver. */
1081
idxd_unbind(&idxd_drv.drv, idxd_name);
1082
1083
/*
1084
* Probe PCI device without allocating or changing
1085
* idxd software data which keeps the same as before FLR.
1086
*/
1087
idxd_pci_probe_alloc(idxd, NULL, NULL);
1088
1089
/* Restore IDXD configurations. */
1090
idxd_device_config_restore(idxd, idxd->idxd_saved);
1091
1092
/* Re-configure IDXD device if allowed. */
1093
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
1094
rc = idxd_device_config(idxd);
1095
if (rc < 0) {
1096
dev_err(dev, "HALT: %s config fails\n", idxd_name);
1097
goto out;
1098
}
1099
}
1100
1101
/* Bind IDXD device to driver. */
1102
rc = idxd_bind(&idxd_drv.drv, idxd_name);
1103
if (rc < 0) {
1104
dev_err(dev, "HALT: binding %s to driver fails\n", idxd_name);
1105
goto out;
1106
}
1107
1108
/* Bind enabled wq in the IDXD device to driver. */
1109
for (i = 0; i < idxd->max_wqs; i++) {
1110
if (test_bit(i, idxd->wq_enable_map)) {
1111
struct idxd_wq *wq = idxd->wqs[i];
1112
char wq_name[32];
1113
1114
wq->state = IDXD_WQ_DISABLED;
1115
sprintf(wq_name, "wq%d.%d", idxd->id, wq->id);
1116
/*
1117
* Bind to user driver depending on wq type.
1118
*
1119
* Currently only support user type WQ. Will support
1120
* kernel type WQ in the future.
1121
*/
1122
if (wq->type == IDXD_WQT_USER)
1123
rc = idxd_bind(&idxd_user_drv.drv, wq_name);
1124
else
1125
rc = -EINVAL;
1126
if (rc < 0) {
1127
clear_bit(i, idxd->wq_enable_map);
1128
dev_err(dev,
1129
"HALT: unable to re-enable wq %s\n",
1130
dev_name(wq_confdev(wq)));
1131
}
1132
}
1133
}
1134
out:
1135
kfree(idxd->idxd_saved);
1136
}
1137
1138
static const struct pci_error_handlers idxd_error_handler = {
1139
.reset_prepare = idxd_reset_prepare,
1140
.reset_done = idxd_reset_done,
1141
};
1142
1143
/*
1144
* Probe idxd PCI device.
1145
* If idxd is not given, need to allocate idxd and set up its data.
1146
*
1147
* If idxd is given, idxd was allocated and setup already. Just need to
1148
* configure device without re-allocating and re-configuring idxd data.
1149
* This is useful for recovering from FLR.
1150
*/
1151
int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
1152
const struct pci_device_id *id)
1153
{
1154
bool alloc_idxd = idxd ? false : true;
1155
struct idxd_driver_data *data;
1156
struct device *dev;
1157
int rc;
1158
1159
pdev = idxd ? idxd->pdev : pdev;
1160
dev = &pdev->dev;
1161
data = id ? (struct idxd_driver_data *)id->driver_data : NULL;
1162
rc = pci_enable_device(pdev);
1163
if (rc)
1164
return rc;
1165
1166
if (alloc_idxd) {
1167
dev_dbg(dev, "Alloc IDXD context\n");
1168
idxd = idxd_alloc(pdev, data);
1169
if (!idxd) {
1170
rc = -ENOMEM;
1171
goto err_idxd_alloc;
1172
}
1173
1174
dev_dbg(dev, "Mapping BARs\n");
1175
idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
1176
if (!idxd->reg_base) {
1177
rc = -ENOMEM;
1178
goto err_iomap;
1179
}
1180
1181
dev_dbg(dev, "Set DMA masks\n");
1182
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1183
if (rc)
1184
goto err;
1185
}
1186
1187
dev_dbg(dev, "Set PCI master\n");
1188
pci_set_master(pdev);
1189
pci_set_drvdata(pdev, idxd);
1190
1191
if (alloc_idxd) {
1192
idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
1193
rc = idxd_probe(idxd);
1194
if (rc) {
1195
dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
1196
goto err;
1197
}
1198
1199
if (data->load_device_defaults) {
1200
rc = data->load_device_defaults(idxd);
1201
if (rc)
1202
dev_warn(dev, "IDXD loading device defaults failed\n");
1203
}
1204
1205
rc = idxd_register_devices(idxd);
1206
if (rc) {
1207
dev_err(dev, "IDXD sysfs setup failed\n");
1208
goto err_dev_register;
1209
}
1210
1211
rc = idxd_device_init_debugfs(idxd);
1212
if (rc)
1213
dev_warn(dev, "IDXD debugfs failed to setup\n");
1214
}
1215
1216
if (!alloc_idxd) {
1217
/* Release interrupts in the IDXD device. */
1218
idxd_cleanup_interrupts(idxd);
1219
1220
/* Re-enable interrupts in the IDXD device. */
1221
rc = idxd_setup_interrupts(idxd);
1222
if (rc)
1223
dev_warn(dev, "IDXD interrupts failed to setup\n");
1224
}
1225
1226
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
1227
idxd->hw.version);
1228
1229
if (data)
1230
idxd->user_submission_safe = data->user_submission_safe;
1231
1232
return 0;
1233
1234
err_dev_register:
1235
idxd_cleanup(idxd);
1236
err:
1237
pci_iounmap(pdev, idxd->reg_base);
1238
err_iomap:
1239
idxd_free(idxd);
1240
err_idxd_alloc:
1241
pci_disable_device(pdev);
1242
return rc;
1243
}
1244
1245
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1246
{
1247
return idxd_pci_probe_alloc(NULL, pdev, id);
1248
}
1249
1250
void idxd_wqs_quiesce(struct idxd_device *idxd)
1251
{
1252
struct idxd_wq *wq;
1253
int i;
1254
1255
for (i = 0; i < idxd->max_wqs; i++) {
1256
wq = idxd->wqs[i];
1257
if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
1258
idxd_wq_quiesce(wq);
1259
}
1260
}
1261
1262
static void idxd_shutdown(struct pci_dev *pdev)
1263
{
1264
struct idxd_device *idxd = pci_get_drvdata(pdev);
1265
struct idxd_irq_entry *irq_entry;
1266
int rc;
1267
1268
rc = idxd_device_disable(idxd);
1269
if (rc)
1270
dev_err(&pdev->dev, "Disabling device failed\n");
1271
1272
irq_entry = &idxd->ie;
1273
synchronize_irq(irq_entry->vector);
1274
idxd_mask_error_interrupts(idxd);
1275
flush_workqueue(idxd->wq);
1276
}
1277
1278
static void idxd_remove(struct pci_dev *pdev)
1279
{
1280
struct idxd_device *idxd = pci_get_drvdata(pdev);
1281
1282
idxd_unregister_devices(idxd);
1283
/*
1284
* When ->release() is called for the idxd->conf_dev, it frees all the memory related
1285
* to the idxd context. The driver still needs those bits in order to do the rest of
1286
* the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref
1287
* on the device here to hold off the freeing while allowing the idxd sub-driver
1288
* to unbind.
1289
*/
1290
get_device(idxd_confdev(idxd));
1291
device_unregister(idxd_confdev(idxd));
1292
idxd_shutdown(pdev);
1293
idxd_device_remove_debugfs(idxd);
1294
idxd_cleanup(idxd);
1295
pci_iounmap(pdev, idxd->reg_base);
1296
put_device(idxd_confdev(idxd));
1297
idxd_free(idxd);
1298
pci_disable_device(pdev);
1299
}
1300
1301
static struct pci_driver idxd_pci_driver = {
1302
.name = DRV_NAME,
1303
.id_table = idxd_pci_tbl,
1304
.probe = idxd_pci_probe,
1305
.remove = idxd_remove,
1306
.shutdown = idxd_shutdown,
1307
.err_handler = &idxd_error_handler,
1308
};
1309
1310
static int __init idxd_init_module(void)
1311
{
1312
int err;
1313
1314
/*
1315
* If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
1316
* enumerating the device. We can not utilize it.
1317
*/
1318
if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
1319
pr_warn("idxd driver failed to load without MOVDIR64B.\n");
1320
return -ENODEV;
1321
}
1322
1323
if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
1324
pr_warn("Platform does not have ENQCMD(S) support.\n");
1325
else
1326
support_enqcmd = true;
1327
1328
err = idxd_driver_register(&idxd_drv);
1329
if (err < 0)
1330
goto err_idxd_driver_register;
1331
1332
err = idxd_driver_register(&idxd_dmaengine_drv);
1333
if (err < 0)
1334
goto err_idxd_dmaengine_driver_register;
1335
1336
err = idxd_driver_register(&idxd_user_drv);
1337
if (err < 0)
1338
goto err_idxd_user_driver_register;
1339
1340
err = idxd_cdev_register();
1341
if (err)
1342
goto err_cdev_register;
1343
1344
err = idxd_init_debugfs();
1345
if (err)
1346
goto err_debugfs;
1347
1348
err = pci_register_driver(&idxd_pci_driver);
1349
if (err)
1350
goto err_pci_register;
1351
1352
return 0;
1353
1354
err_pci_register:
1355
idxd_remove_debugfs();
1356
err_debugfs:
1357
idxd_cdev_remove();
1358
err_cdev_register:
1359
idxd_driver_unregister(&idxd_user_drv);
1360
err_idxd_user_driver_register:
1361
idxd_driver_unregister(&idxd_dmaengine_drv);
1362
err_idxd_dmaengine_driver_register:
1363
idxd_driver_unregister(&idxd_drv);
1364
err_idxd_driver_register:
1365
return err;
1366
}
1367
module_init(idxd_init_module);
1368
1369
static void __exit idxd_exit_module(void)
1370
{
1371
idxd_driver_unregister(&idxd_user_drv);
1372
idxd_driver_unregister(&idxd_dmaengine_drv);
1373
idxd_driver_unregister(&idxd_drv);
1374
pci_unregister_driver(&idxd_pci_driver);
1375
idxd_cdev_remove();
1376
idxd_remove_debugfs();
1377
}
1378
module_exit(idxd_exit_module);
1379
1380