Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/idxd/sysfs.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3
#include <linux/init.h>
4
#include <linux/kernel.h>
5
#include <linux/module.h>
6
#include <linux/pci.h>
7
#include <linux/device.h>
8
#include <linux/io-64-nonatomic-lo-hi.h>
9
#include <uapi/linux/idxd.h>
10
#include "registers.h"
11
#include "idxd.h"
12
13
static char *idxd_wq_type_names[] = {
14
[IDXD_WQT_NONE] = "none",
15
[IDXD_WQT_KERNEL] = "kernel",
16
[IDXD_WQT_USER] = "user",
17
};
18
19
/* IDXD engine attributes */
20
static ssize_t engine_group_id_show(struct device *dev,
21
struct device_attribute *attr, char *buf)
22
{
23
struct idxd_engine *engine = confdev_to_engine(dev);
24
25
if (engine->group)
26
return sysfs_emit(buf, "%d\n", engine->group->id);
27
else
28
return sysfs_emit(buf, "%d\n", -1);
29
}
30
31
static ssize_t engine_group_id_store(struct device *dev,
32
struct device_attribute *attr,
33
const char *buf, size_t count)
34
{
35
struct idxd_engine *engine = confdev_to_engine(dev);
36
struct idxd_device *idxd = engine->idxd;
37
long id;
38
int rc;
39
struct idxd_group *prevg;
40
41
rc = kstrtol(buf, 10, &id);
42
if (rc < 0)
43
return -EINVAL;
44
45
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
46
return -EPERM;
47
48
if (id > idxd->max_groups - 1 || id < -1)
49
return -EINVAL;
50
51
if (id == -1) {
52
if (engine->group) {
53
engine->group->num_engines--;
54
engine->group = NULL;
55
}
56
return count;
57
}
58
59
prevg = engine->group;
60
61
if (prevg)
62
prevg->num_engines--;
63
engine->group = idxd->groups[id];
64
engine->group->num_engines++;
65
66
return count;
67
}
68
69
static struct device_attribute dev_attr_engine_group =
70
__ATTR(group_id, 0644, engine_group_id_show,
71
engine_group_id_store);
72
73
static struct attribute *idxd_engine_attributes[] = {
74
&dev_attr_engine_group.attr,
75
NULL,
76
};
77
78
static const struct attribute_group idxd_engine_attribute_group = {
79
.attrs = idxd_engine_attributes,
80
};
81
82
static const struct attribute_group *idxd_engine_attribute_groups[] = {
83
&idxd_engine_attribute_group,
84
NULL,
85
};
86
87
static void idxd_conf_engine_release(struct device *dev)
88
{
89
struct idxd_engine *engine = confdev_to_engine(dev);
90
91
kfree(engine);
92
}
93
94
const struct device_type idxd_engine_device_type = {
95
.name = "engine",
96
.release = idxd_conf_engine_release,
97
.groups = idxd_engine_attribute_groups,
98
};
99
100
/* Group attributes */
101
102
static void idxd_set_free_rdbufs(struct idxd_device *idxd)
103
{
104
int i, rdbufs;
105
106
for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) {
107
struct idxd_group *g = idxd->groups[i];
108
109
rdbufs += g->rdbufs_reserved;
110
}
111
112
idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs;
113
}
114
115
static ssize_t group_read_buffers_reserved_show(struct device *dev,
116
struct device_attribute *attr,
117
char *buf)
118
{
119
struct idxd_group *group = confdev_to_group(dev);
120
121
return sysfs_emit(buf, "%u\n", group->rdbufs_reserved);
122
}
123
124
static ssize_t group_tokens_reserved_show(struct device *dev,
125
struct device_attribute *attr,
126
char *buf)
127
{
128
dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
129
return group_read_buffers_reserved_show(dev, attr, buf);
130
}
131
132
static ssize_t group_read_buffers_reserved_store(struct device *dev,
133
struct device_attribute *attr,
134
const char *buf, size_t count)
135
{
136
struct idxd_group *group = confdev_to_group(dev);
137
struct idxd_device *idxd = group->idxd;
138
unsigned long val;
139
int rc;
140
141
rc = kstrtoul(buf, 10, &val);
142
if (rc < 0)
143
return -EINVAL;
144
145
if (idxd->data->type == IDXD_TYPE_IAX)
146
return -EOPNOTSUPP;
147
148
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
149
return -EPERM;
150
151
if (idxd->state == IDXD_DEV_ENABLED)
152
return -EPERM;
153
154
if (val > idxd->max_rdbufs)
155
return -EINVAL;
156
157
if (val > idxd->nr_rdbufs + group->rdbufs_reserved)
158
return -EINVAL;
159
160
group->rdbufs_reserved = val;
161
idxd_set_free_rdbufs(idxd);
162
return count;
163
}
164
165
static ssize_t group_tokens_reserved_store(struct device *dev,
166
struct device_attribute *attr,
167
const char *buf, size_t count)
168
{
169
dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
170
return group_read_buffers_reserved_store(dev, attr, buf, count);
171
}
172
173
static struct device_attribute dev_attr_group_tokens_reserved =
174
__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
175
group_tokens_reserved_store);
176
177
static struct device_attribute dev_attr_group_read_buffers_reserved =
178
__ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show,
179
group_read_buffers_reserved_store);
180
181
static ssize_t group_read_buffers_allowed_show(struct device *dev,
182
struct device_attribute *attr,
183
char *buf)
184
{
185
struct idxd_group *group = confdev_to_group(dev);
186
187
return sysfs_emit(buf, "%u\n", group->rdbufs_allowed);
188
}
189
190
static ssize_t group_tokens_allowed_show(struct device *dev,
191
struct device_attribute *attr,
192
char *buf)
193
{
194
dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
195
return group_read_buffers_allowed_show(dev, attr, buf);
196
}
197
198
static ssize_t group_read_buffers_allowed_store(struct device *dev,
199
struct device_attribute *attr,
200
const char *buf, size_t count)
201
{
202
struct idxd_group *group = confdev_to_group(dev);
203
struct idxd_device *idxd = group->idxd;
204
unsigned long val;
205
int rc;
206
207
rc = kstrtoul(buf, 10, &val);
208
if (rc < 0)
209
return -EINVAL;
210
211
if (idxd->data->type == IDXD_TYPE_IAX)
212
return -EOPNOTSUPP;
213
214
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
215
return -EPERM;
216
217
if (idxd->state == IDXD_DEV_ENABLED)
218
return -EPERM;
219
220
if (val < 4 * group->num_engines ||
221
val > group->rdbufs_reserved + idxd->nr_rdbufs)
222
return -EINVAL;
223
224
group->rdbufs_allowed = val;
225
return count;
226
}
227
228
static ssize_t group_tokens_allowed_store(struct device *dev,
229
struct device_attribute *attr,
230
const char *buf, size_t count)
231
{
232
dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
233
return group_read_buffers_allowed_store(dev, attr, buf, count);
234
}
235
236
static struct device_attribute dev_attr_group_tokens_allowed =
237
__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
238
group_tokens_allowed_store);
239
240
static struct device_attribute dev_attr_group_read_buffers_allowed =
241
__ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show,
242
group_read_buffers_allowed_store);
243
244
static ssize_t group_use_read_buffer_limit_show(struct device *dev,
245
struct device_attribute *attr,
246
char *buf)
247
{
248
struct idxd_group *group = confdev_to_group(dev);
249
250
return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit);
251
}
252
253
static ssize_t group_use_token_limit_show(struct device *dev,
254
struct device_attribute *attr,
255
char *buf)
256
{
257
dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
258
return group_use_read_buffer_limit_show(dev, attr, buf);
259
}
260
261
static ssize_t group_use_read_buffer_limit_store(struct device *dev,
262
struct device_attribute *attr,
263
const char *buf, size_t count)
264
{
265
struct idxd_group *group = confdev_to_group(dev);
266
struct idxd_device *idxd = group->idxd;
267
unsigned long val;
268
int rc;
269
270
rc = kstrtoul(buf, 10, &val);
271
if (rc < 0)
272
return -EINVAL;
273
274
if (idxd->data->type == IDXD_TYPE_IAX)
275
return -EOPNOTSUPP;
276
277
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
278
return -EPERM;
279
280
if (idxd->state == IDXD_DEV_ENABLED)
281
return -EPERM;
282
283
if (idxd->rdbuf_limit == 0)
284
return -EPERM;
285
286
group->use_rdbuf_limit = !!val;
287
return count;
288
}
289
290
static ssize_t group_use_token_limit_store(struct device *dev,
291
struct device_attribute *attr,
292
const char *buf, size_t count)
293
{
294
dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
295
return group_use_read_buffer_limit_store(dev, attr, buf, count);
296
}
297
298
static struct device_attribute dev_attr_group_use_token_limit =
299
__ATTR(use_token_limit, 0644, group_use_token_limit_show,
300
group_use_token_limit_store);
301
302
static struct device_attribute dev_attr_group_use_read_buffer_limit =
303
__ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show,
304
group_use_read_buffer_limit_store);
305
306
static ssize_t group_engines_show(struct device *dev,
307
struct device_attribute *attr, char *buf)
308
{
309
struct idxd_group *group = confdev_to_group(dev);
310
int i, rc = 0;
311
struct idxd_device *idxd = group->idxd;
312
313
for (i = 0; i < idxd->max_engines; i++) {
314
struct idxd_engine *engine = idxd->engines[i];
315
316
if (!engine->group)
317
continue;
318
319
if (engine->group->id == group->id)
320
rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
321
}
322
323
if (!rc)
324
return 0;
325
rc--;
326
rc += sysfs_emit_at(buf, rc, "\n");
327
328
return rc;
329
}
330
331
static struct device_attribute dev_attr_group_engines =
332
__ATTR(engines, 0444, group_engines_show, NULL);
333
334
static ssize_t group_work_queues_show(struct device *dev,
335
struct device_attribute *attr, char *buf)
336
{
337
struct idxd_group *group = confdev_to_group(dev);
338
int i, rc = 0;
339
struct idxd_device *idxd = group->idxd;
340
341
for (i = 0; i < idxd->max_wqs; i++) {
342
struct idxd_wq *wq = idxd->wqs[i];
343
344
if (!wq->group)
345
continue;
346
347
if (wq->group->id == group->id)
348
rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
349
}
350
351
if (!rc)
352
return 0;
353
rc--;
354
rc += sysfs_emit_at(buf, rc, "\n");
355
356
return rc;
357
}
358
359
static struct device_attribute dev_attr_group_work_queues =
360
__ATTR(work_queues, 0444, group_work_queues_show, NULL);
361
362
static ssize_t group_traffic_class_a_show(struct device *dev,
363
struct device_attribute *attr,
364
char *buf)
365
{
366
struct idxd_group *group = confdev_to_group(dev);
367
368
return sysfs_emit(buf, "%d\n", group->tc_a);
369
}
370
371
static ssize_t group_traffic_class_a_store(struct device *dev,
372
struct device_attribute *attr,
373
const char *buf, size_t count)
374
{
375
struct idxd_group *group = confdev_to_group(dev);
376
struct idxd_device *idxd = group->idxd;
377
long val;
378
int rc;
379
380
rc = kstrtol(buf, 10, &val);
381
if (rc < 0)
382
return -EINVAL;
383
384
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
385
return -EPERM;
386
387
if (idxd->state == IDXD_DEV_ENABLED)
388
return -EPERM;
389
390
if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
391
return -EPERM;
392
393
if (val < 0 || val > 7)
394
return -EINVAL;
395
396
group->tc_a = val;
397
return count;
398
}
399
400
static struct device_attribute dev_attr_group_traffic_class_a =
401
__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
402
group_traffic_class_a_store);
403
404
static ssize_t group_traffic_class_b_show(struct device *dev,
405
struct device_attribute *attr,
406
char *buf)
407
{
408
struct idxd_group *group = confdev_to_group(dev);
409
410
return sysfs_emit(buf, "%d\n", group->tc_b);
411
}
412
413
static ssize_t group_traffic_class_b_store(struct device *dev,
414
struct device_attribute *attr,
415
const char *buf, size_t count)
416
{
417
struct idxd_group *group = confdev_to_group(dev);
418
struct idxd_device *idxd = group->idxd;
419
long val;
420
int rc;
421
422
rc = kstrtol(buf, 10, &val);
423
if (rc < 0)
424
return -EINVAL;
425
426
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
427
return -EPERM;
428
429
if (idxd->state == IDXD_DEV_ENABLED)
430
return -EPERM;
431
432
if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
433
return -EPERM;
434
435
if (val < 0 || val > 7)
436
return -EINVAL;
437
438
group->tc_b = val;
439
return count;
440
}
441
442
static struct device_attribute dev_attr_group_traffic_class_b =
443
__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
444
group_traffic_class_b_store);
445
446
static ssize_t group_desc_progress_limit_show(struct device *dev,
447
struct device_attribute *attr,
448
char *buf)
449
{
450
struct idxd_group *group = confdev_to_group(dev);
451
452
return sysfs_emit(buf, "%d\n", group->desc_progress_limit);
453
}
454
455
static ssize_t group_desc_progress_limit_store(struct device *dev,
456
struct device_attribute *attr,
457
const char *buf, size_t count)
458
{
459
struct idxd_group *group = confdev_to_group(dev);
460
int val, rc;
461
462
rc = kstrtoint(buf, 10, &val);
463
if (rc < 0)
464
return -EINVAL;
465
466
if (val & ~GENMASK(1, 0))
467
return -EINVAL;
468
469
group->desc_progress_limit = val;
470
return count;
471
}
472
473
static struct device_attribute dev_attr_group_desc_progress_limit =
474
__ATTR(desc_progress_limit, 0644, group_desc_progress_limit_show,
475
group_desc_progress_limit_store);
476
477
static ssize_t group_batch_progress_limit_show(struct device *dev,
478
struct device_attribute *attr,
479
char *buf)
480
{
481
struct idxd_group *group = confdev_to_group(dev);
482
483
return sysfs_emit(buf, "%d\n", group->batch_progress_limit);
484
}
485
486
static ssize_t group_batch_progress_limit_store(struct device *dev,
487
struct device_attribute *attr,
488
const char *buf, size_t count)
489
{
490
struct idxd_group *group = confdev_to_group(dev);
491
int val, rc;
492
493
rc = kstrtoint(buf, 10, &val);
494
if (rc < 0)
495
return -EINVAL;
496
497
if (val & ~GENMASK(1, 0))
498
return -EINVAL;
499
500
group->batch_progress_limit = val;
501
return count;
502
}
503
504
static struct device_attribute dev_attr_group_batch_progress_limit =
505
__ATTR(batch_progress_limit, 0644, group_batch_progress_limit_show,
506
group_batch_progress_limit_store);
507
static struct attribute *idxd_group_attributes[] = {
508
&dev_attr_group_work_queues.attr,
509
&dev_attr_group_engines.attr,
510
&dev_attr_group_use_token_limit.attr,
511
&dev_attr_group_use_read_buffer_limit.attr,
512
&dev_attr_group_tokens_allowed.attr,
513
&dev_attr_group_read_buffers_allowed.attr,
514
&dev_attr_group_tokens_reserved.attr,
515
&dev_attr_group_read_buffers_reserved.attr,
516
&dev_attr_group_traffic_class_a.attr,
517
&dev_attr_group_traffic_class_b.attr,
518
&dev_attr_group_desc_progress_limit.attr,
519
&dev_attr_group_batch_progress_limit.attr,
520
NULL,
521
};
522
523
static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr,
524
struct idxd_device *idxd)
525
{
526
return (attr == &dev_attr_group_desc_progress_limit.attr ||
527
attr == &dev_attr_group_batch_progress_limit.attr) &&
528
!idxd->hw.group_cap.progress_limit;
529
}
530
531
static bool idxd_group_attr_read_buffers_invisible(struct attribute *attr,
532
struct idxd_device *idxd)
533
{
534
/*
535
* Intel IAA does not support Read Buffer allocation control,
536
* make these attributes invisible.
537
*/
538
return (attr == &dev_attr_group_use_token_limit.attr ||
539
attr == &dev_attr_group_use_read_buffer_limit.attr ||
540
attr == &dev_attr_group_tokens_allowed.attr ||
541
attr == &dev_attr_group_read_buffers_allowed.attr ||
542
attr == &dev_attr_group_tokens_reserved.attr ||
543
attr == &dev_attr_group_read_buffers_reserved.attr) &&
544
idxd->data->type == IDXD_TYPE_IAX;
545
}
546
547
static umode_t idxd_group_attr_visible(struct kobject *kobj,
548
struct attribute *attr, int n)
549
{
550
struct device *dev = container_of(kobj, struct device, kobj);
551
struct idxd_group *group = confdev_to_group(dev);
552
struct idxd_device *idxd = group->idxd;
553
554
if (idxd_group_attr_progress_limit_invisible(attr, idxd))
555
return 0;
556
557
if (idxd_group_attr_read_buffers_invisible(attr, idxd))
558
return 0;
559
560
return attr->mode;
561
}
562
563
static const struct attribute_group idxd_group_attribute_group = {
564
.attrs = idxd_group_attributes,
565
.is_visible = idxd_group_attr_visible,
566
};
567
568
static const struct attribute_group *idxd_group_attribute_groups[] = {
569
&idxd_group_attribute_group,
570
NULL,
571
};
572
573
static void idxd_conf_group_release(struct device *dev)
574
{
575
struct idxd_group *group = confdev_to_group(dev);
576
577
kfree(group);
578
}
579
580
const struct device_type idxd_group_device_type = {
581
.name = "group",
582
.release = idxd_conf_group_release,
583
.groups = idxd_group_attribute_groups,
584
};
585
586
/* IDXD work queue attribs */
587
static ssize_t wq_clients_show(struct device *dev,
588
struct device_attribute *attr, char *buf)
589
{
590
struct idxd_wq *wq = confdev_to_wq(dev);
591
592
return sysfs_emit(buf, "%d\n", wq->client_count);
593
}
594
595
static struct device_attribute dev_attr_wq_clients =
596
__ATTR(clients, 0444, wq_clients_show, NULL);
597
598
static ssize_t wq_state_show(struct device *dev,
599
struct device_attribute *attr, char *buf)
600
{
601
struct idxd_wq *wq = confdev_to_wq(dev);
602
603
switch (wq->state) {
604
case IDXD_WQ_DISABLED:
605
return sysfs_emit(buf, "disabled\n");
606
case IDXD_WQ_ENABLED:
607
return sysfs_emit(buf, "enabled\n");
608
}
609
610
return sysfs_emit(buf, "unknown\n");
611
}
612
613
static struct device_attribute dev_attr_wq_state =
614
__ATTR(state, 0444, wq_state_show, NULL);
615
616
static ssize_t wq_group_id_show(struct device *dev,
617
struct device_attribute *attr, char *buf)
618
{
619
struct idxd_wq *wq = confdev_to_wq(dev);
620
621
if (wq->group)
622
return sysfs_emit(buf, "%u\n", wq->group->id);
623
else
624
return sysfs_emit(buf, "-1\n");
625
}
626
627
static ssize_t wq_group_id_store(struct device *dev,
628
struct device_attribute *attr,
629
const char *buf, size_t count)
630
{
631
struct idxd_wq *wq = confdev_to_wq(dev);
632
struct idxd_device *idxd = wq->idxd;
633
long id;
634
int rc;
635
struct idxd_group *prevg, *group;
636
637
rc = kstrtol(buf, 10, &id);
638
if (rc < 0)
639
return -EINVAL;
640
641
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
642
return -EPERM;
643
644
if (wq->state != IDXD_WQ_DISABLED)
645
return -EPERM;
646
647
if (id > idxd->max_groups - 1 || id < -1)
648
return -EINVAL;
649
650
if (id == -1) {
651
if (wq->group) {
652
wq->group->num_wqs--;
653
wq->group = NULL;
654
}
655
return count;
656
}
657
658
group = idxd->groups[id];
659
prevg = wq->group;
660
661
if (prevg)
662
prevg->num_wqs--;
663
wq->group = group;
664
group->num_wqs++;
665
return count;
666
}
667
668
static struct device_attribute dev_attr_wq_group_id =
669
__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
670
671
static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
672
char *buf)
673
{
674
struct idxd_wq *wq = confdev_to_wq(dev);
675
676
return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
677
}
678
679
static ssize_t wq_mode_store(struct device *dev,
680
struct device_attribute *attr, const char *buf,
681
size_t count)
682
{
683
struct idxd_wq *wq = confdev_to_wq(dev);
684
struct idxd_device *idxd = wq->idxd;
685
686
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
687
return -EPERM;
688
689
if (wq->state != IDXD_WQ_DISABLED)
690
return -EPERM;
691
692
if (sysfs_streq(buf, "dedicated")) {
693
set_bit(WQ_FLAG_DEDICATED, &wq->flags);
694
wq->threshold = 0;
695
} else if (sysfs_streq(buf, "shared")) {
696
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
697
} else {
698
return -EINVAL;
699
}
700
701
return count;
702
}
703
704
static struct device_attribute dev_attr_wq_mode =
705
__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
706
707
static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
708
char *buf)
709
{
710
struct idxd_wq *wq = confdev_to_wq(dev);
711
712
return sysfs_emit(buf, "%u\n", wq->size);
713
}
714
715
static int total_claimed_wq_size(struct idxd_device *idxd)
716
{
717
int i;
718
int wq_size = 0;
719
720
for (i = 0; i < idxd->max_wqs; i++) {
721
struct idxd_wq *wq = idxd->wqs[i];
722
723
wq_size += wq->size;
724
}
725
726
return wq_size;
727
}
728
729
static ssize_t wq_size_store(struct device *dev,
730
struct device_attribute *attr, const char *buf,
731
size_t count)
732
{
733
struct idxd_wq *wq = confdev_to_wq(dev);
734
unsigned long size;
735
struct idxd_device *idxd = wq->idxd;
736
int rc;
737
738
rc = kstrtoul(buf, 10, &size);
739
if (rc < 0)
740
return -EINVAL;
741
742
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
743
return -EPERM;
744
745
if (idxd->state == IDXD_DEV_ENABLED)
746
return -EPERM;
747
748
if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
749
return -EINVAL;
750
751
wq->size = size;
752
return count;
753
}
754
755
static struct device_attribute dev_attr_wq_size =
756
__ATTR(size, 0644, wq_size_show, wq_size_store);
757
758
static ssize_t wq_priority_show(struct device *dev,
759
struct device_attribute *attr, char *buf)
760
{
761
struct idxd_wq *wq = confdev_to_wq(dev);
762
763
return sysfs_emit(buf, "%u\n", wq->priority);
764
}
765
766
static ssize_t wq_priority_store(struct device *dev,
767
struct device_attribute *attr,
768
const char *buf, size_t count)
769
{
770
struct idxd_wq *wq = confdev_to_wq(dev);
771
unsigned long prio;
772
struct idxd_device *idxd = wq->idxd;
773
int rc;
774
775
rc = kstrtoul(buf, 10, &prio);
776
if (rc < 0)
777
return -EINVAL;
778
779
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
780
return -EPERM;
781
782
if (wq->state != IDXD_WQ_DISABLED)
783
return -EPERM;
784
785
if (prio > IDXD_MAX_PRIORITY)
786
return -EINVAL;
787
788
wq->priority = prio;
789
return count;
790
}
791
792
static struct device_attribute dev_attr_wq_priority =
793
__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
794
795
static ssize_t wq_block_on_fault_show(struct device *dev,
796
struct device_attribute *attr, char *buf)
797
{
798
struct idxd_wq *wq = confdev_to_wq(dev);
799
800
return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
801
}
802
803
static ssize_t wq_block_on_fault_store(struct device *dev,
804
struct device_attribute *attr,
805
const char *buf, size_t count)
806
{
807
struct idxd_wq *wq = confdev_to_wq(dev);
808
struct idxd_device *idxd = wq->idxd;
809
bool bof;
810
int rc;
811
812
if (!idxd->hw.gen_cap.block_on_fault)
813
return -EOPNOTSUPP;
814
815
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
816
return -EPERM;
817
818
if (wq->state != IDXD_WQ_DISABLED)
819
return -ENXIO;
820
821
rc = kstrtobool(buf, &bof);
822
if (rc < 0)
823
return rc;
824
825
if (bof) {
826
if (test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
827
return -EOPNOTSUPP;
828
829
set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
830
} else {
831
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
832
}
833
834
return count;
835
}
836
837
static struct device_attribute dev_attr_wq_block_on_fault =
838
__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
839
wq_block_on_fault_store);
840
841
static ssize_t wq_threshold_show(struct device *dev,
842
struct device_attribute *attr, char *buf)
843
{
844
struct idxd_wq *wq = confdev_to_wq(dev);
845
846
return sysfs_emit(buf, "%u\n", wq->threshold);
847
}
848
849
static ssize_t wq_threshold_store(struct device *dev,
850
struct device_attribute *attr,
851
const char *buf, size_t count)
852
{
853
struct idxd_wq *wq = confdev_to_wq(dev);
854
struct idxd_device *idxd = wq->idxd;
855
unsigned int val;
856
int rc;
857
858
rc = kstrtouint(buf, 0, &val);
859
if (rc < 0)
860
return -EINVAL;
861
862
if (val > wq->size || val <= 0)
863
return -EINVAL;
864
865
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
866
return -EPERM;
867
868
if (wq->state != IDXD_WQ_DISABLED)
869
return -ENXIO;
870
871
if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
872
return -EINVAL;
873
874
wq->threshold = val;
875
876
return count;
877
}
878
879
static struct device_attribute dev_attr_wq_threshold =
880
__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
881
882
static ssize_t wq_type_show(struct device *dev,
883
struct device_attribute *attr, char *buf)
884
{
885
struct idxd_wq *wq = confdev_to_wq(dev);
886
887
switch (wq->type) {
888
case IDXD_WQT_KERNEL:
889
return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
890
case IDXD_WQT_USER:
891
return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
892
case IDXD_WQT_NONE:
893
default:
894
return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
895
}
896
897
return -EINVAL;
898
}
899
900
static ssize_t wq_type_store(struct device *dev,
901
struct device_attribute *attr, const char *buf,
902
size_t count)
903
{
904
struct idxd_wq *wq = confdev_to_wq(dev);
905
enum idxd_wq_type old_type;
906
907
if (wq->state != IDXD_WQ_DISABLED)
908
return -EPERM;
909
910
old_type = wq->type;
911
if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
912
wq->type = IDXD_WQT_NONE;
913
else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
914
wq->type = IDXD_WQT_KERNEL;
915
else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
916
wq->type = IDXD_WQT_USER;
917
else
918
return -EINVAL;
919
920
/* If we are changing queue type, clear the name */
921
if (wq->type != old_type)
922
memset(wq->name, 0, WQ_NAME_SIZE + 1);
923
924
return count;
925
}
926
927
static struct device_attribute dev_attr_wq_type =
928
__ATTR(type, 0644, wq_type_show, wq_type_store);
929
930
static ssize_t wq_name_show(struct device *dev,
931
struct device_attribute *attr, char *buf)
932
{
933
struct idxd_wq *wq = confdev_to_wq(dev);
934
935
return sysfs_emit(buf, "%s\n", wq->name);
936
}
937
938
static ssize_t wq_name_store(struct device *dev,
939
struct device_attribute *attr, const char *buf,
940
size_t count)
941
{
942
struct idxd_wq *wq = confdev_to_wq(dev);
943
char *input, *pos;
944
945
if (wq->state != IDXD_WQ_DISABLED)
946
return -EPERM;
947
948
if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
949
return -EINVAL;
950
951
input = kstrndup(buf, count, GFP_KERNEL);
952
if (!input)
953
return -ENOMEM;
954
955
pos = strim(input);
956
memset(wq->name, 0, WQ_NAME_SIZE + 1);
957
sprintf(wq->name, "%s", pos);
958
kfree(input);
959
return count;
960
}
961
962
static struct device_attribute dev_attr_wq_name =
963
__ATTR(name, 0644, wq_name_show, wq_name_store);
964
965
static ssize_t wq_cdev_minor_show(struct device *dev,
966
struct device_attribute *attr, char *buf)
967
{
968
struct idxd_wq *wq = confdev_to_wq(dev);
969
int minor = -1;
970
971
mutex_lock(&wq->wq_lock);
972
if (wq->idxd_cdev)
973
minor = wq->idxd_cdev->minor;
974
mutex_unlock(&wq->wq_lock);
975
976
if (minor == -1)
977
return -ENXIO;
978
return sysfs_emit(buf, "%d\n", minor);
979
}
980
981
static struct device_attribute dev_attr_wq_cdev_minor =
982
__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
983
984
static int __get_sysfs_u64(const char *buf, u64 *val)
985
{
986
int rc;
987
988
rc = kstrtou64(buf, 0, val);
989
if (rc < 0)
990
return -EINVAL;
991
992
if (*val == 0)
993
return -EINVAL;
994
995
*val = roundup_pow_of_two(*val);
996
return 0;
997
}
998
999
static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1000
char *buf)
1001
{
1002
struct idxd_wq *wq = confdev_to_wq(dev);
1003
1004
return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
1005
}
1006
1007
static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1008
const char *buf, size_t count)
1009
{
1010
struct idxd_wq *wq = confdev_to_wq(dev);
1011
struct idxd_device *idxd = wq->idxd;
1012
u64 xfer_size;
1013
int rc;
1014
1015
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1016
return -EPERM;
1017
1018
if (wq->state != IDXD_WQ_DISABLED)
1019
return -EPERM;
1020
1021
rc = __get_sysfs_u64(buf, &xfer_size);
1022
if (rc < 0)
1023
return rc;
1024
1025
if (xfer_size > idxd->max_xfer_bytes)
1026
return -EINVAL;
1027
1028
wq->max_xfer_bytes = xfer_size;
1029
1030
return count;
1031
}
1032
1033
static struct device_attribute dev_attr_wq_max_transfer_size =
1034
__ATTR(max_transfer_size, 0644,
1035
wq_max_transfer_size_show, wq_max_transfer_size_store);
1036
1037
static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1038
{
1039
struct idxd_wq *wq = confdev_to_wq(dev);
1040
1041
return sysfs_emit(buf, "%u\n", wq->max_batch_size);
1042
}
1043
1044
static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1045
const char *buf, size_t count)
1046
{
1047
struct idxd_wq *wq = confdev_to_wq(dev);
1048
struct idxd_device *idxd = wq->idxd;
1049
u64 batch_size;
1050
int rc;
1051
1052
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1053
return -EPERM;
1054
1055
if (wq->state != IDXD_WQ_DISABLED)
1056
return -EPERM;
1057
1058
rc = __get_sysfs_u64(buf, &batch_size);
1059
if (rc < 0)
1060
return rc;
1061
1062
if (batch_size > idxd->max_batch_size)
1063
return -EINVAL;
1064
1065
idxd_wq_set_max_batch_size(idxd->data->type, wq, (u32)batch_size);
1066
1067
return count;
1068
}
1069
1070
static struct device_attribute dev_attr_wq_max_batch_size =
1071
__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1072
1073
static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1074
{
1075
struct idxd_wq *wq = confdev_to_wq(dev);
1076
1077
return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags));
1078
}
1079
1080
static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1081
const char *buf, size_t count)
1082
{
1083
struct idxd_wq *wq = confdev_to_wq(dev);
1084
struct idxd_device *idxd = wq->idxd;
1085
bool ats_dis;
1086
int rc;
1087
1088
if (wq->state != IDXD_WQ_DISABLED)
1089
return -EPERM;
1090
1091
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1092
return -EPERM;
1093
1094
rc = kstrtobool(buf, &ats_dis);
1095
if (rc < 0)
1096
return rc;
1097
1098
if (ats_dis)
1099
set_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
1100
else
1101
clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
1102
1103
return count;
1104
}
1105
1106
static struct device_attribute dev_attr_wq_ats_disable =
1107
__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1108
1109
static ssize_t wq_prs_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1110
{
1111
struct idxd_wq *wq = confdev_to_wq(dev);
1112
1113
return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags));
1114
}
1115
1116
static ssize_t wq_prs_disable_store(struct device *dev, struct device_attribute *attr,
1117
const char *buf, size_t count)
1118
{
1119
struct idxd_wq *wq = confdev_to_wq(dev);
1120
struct idxd_device *idxd = wq->idxd;
1121
bool prs_dis;
1122
int rc;
1123
1124
if (wq->state != IDXD_WQ_DISABLED)
1125
return -EPERM;
1126
1127
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1128
return -EPERM;
1129
1130
rc = kstrtobool(buf, &prs_dis);
1131
if (rc < 0)
1132
return rc;
1133
1134
if (prs_dis) {
1135
set_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
1136
/* when PRS is disabled, BOF needs to be off as well */
1137
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
1138
} else {
1139
clear_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
1140
}
1141
return count;
1142
}
1143
1144
static struct device_attribute dev_attr_wq_prs_disable =
1145
__ATTR(prs_disable, 0644, wq_prs_disable_show, wq_prs_disable_store);
1146
1147
static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
1148
{
1149
struct idxd_wq *wq = confdev_to_wq(dev);
1150
struct idxd_device *idxd = wq->idxd;
1151
u32 occup, offset;
1152
1153
if (!idxd->hw.wq_cap.occupancy)
1154
return -EOPNOTSUPP;
1155
1156
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
1157
occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
1158
1159
return sysfs_emit(buf, "%u\n", occup);
1160
}
1161
1162
static struct device_attribute dev_attr_wq_occupancy =
1163
__ATTR(occupancy, 0444, wq_occupancy_show, NULL);
1164
1165
static ssize_t wq_enqcmds_retries_show(struct device *dev,
1166
struct device_attribute *attr, char *buf)
1167
{
1168
struct idxd_wq *wq = confdev_to_wq(dev);
1169
1170
if (wq_dedicated(wq))
1171
return -EOPNOTSUPP;
1172
1173
return sysfs_emit(buf, "%u\n", wq->enqcmds_retries);
1174
}
1175
1176
static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr,
1177
const char *buf, size_t count)
1178
{
1179
struct idxd_wq *wq = confdev_to_wq(dev);
1180
int rc;
1181
unsigned int retries;
1182
1183
if (wq_dedicated(wq))
1184
return -EOPNOTSUPP;
1185
1186
rc = kstrtouint(buf, 10, &retries);
1187
if (rc < 0)
1188
return rc;
1189
1190
if (retries > IDXD_ENQCMDS_MAX_RETRIES)
1191
retries = IDXD_ENQCMDS_MAX_RETRIES;
1192
1193
wq->enqcmds_retries = retries;
1194
return count;
1195
}
1196
1197
static struct device_attribute dev_attr_wq_enqcmds_retries =
1198
__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
1199
1200
static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *opcap_bmap)
1201
{
1202
ssize_t pos;
1203
int i;
1204
1205
pos = 0;
1206
for (i = IDXD_MAX_OPCAP_BITS/64 - 1; i >= 0; i--) {
1207
unsigned long val = opcap_bmap[i];
1208
1209
/* On systems where direct user submissions are not safe, we need to clear out
1210
* the BATCH capability from the capability mask in sysfs since we cannot support
1211
* that command on such systems. Narrow the restriction of operations with the
1212
* BATCH opcode to only DSA version 1 devices.
1213
*/
1214
if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe &&
1215
confdev_to_idxd(dev)->hw.version == DEVICE_VERSION_1)
1216
clear_bit(DSA_OPCODE_BATCH % 64, &val);
1217
1218
pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
1219
pos += sysfs_emit_at(buf, pos, "%c", i == 0 ? '\n' : ',');
1220
}
1221
1222
return pos;
1223
}
1224
1225
static ssize_t wq_op_config_show(struct device *dev,
1226
struct device_attribute *attr, char *buf)
1227
{
1228
struct idxd_wq *wq = confdev_to_wq(dev);
1229
1230
return op_cap_show_common(dev, buf, wq->opcap_bmap);
1231
}
1232
1233
static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
1234
{
1235
int bit;
1236
1237
/*
1238
* The OPCAP is defined as 256 bits that represents each operation the device
1239
* supports per bit. Iterate through all the bits and check if the input mask
1240
* is set for bits that are not set in the OPCAP for the device. If no OPCAP
1241
* bit is set and input mask has the bit set, then return error.
1242
*/
1243
for_each_set_bit(bit, opmask, IDXD_MAX_OPCAP_BITS) {
1244
if (!test_bit(bit, idxd->opcap_bmap))
1245
return -EINVAL;
1246
}
1247
1248
return 0;
1249
}
1250
1251
static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *attr,
1252
const char *buf, size_t count)
1253
{
1254
struct idxd_wq *wq = confdev_to_wq(dev);
1255
struct idxd_device *idxd = wq->idxd;
1256
unsigned long *opmask;
1257
int rc;
1258
1259
if (wq->state != IDXD_WQ_DISABLED)
1260
return -EPERM;
1261
1262
opmask = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
1263
if (!opmask)
1264
return -ENOMEM;
1265
1266
rc = bitmap_parse(buf, count, opmask, IDXD_MAX_OPCAP_BITS);
1267
if (rc < 0)
1268
goto err;
1269
1270
rc = idxd_verify_supported_opcap(idxd, opmask);
1271
if (rc < 0)
1272
goto err;
1273
1274
bitmap_copy(wq->opcap_bmap, opmask, IDXD_MAX_OPCAP_BITS);
1275
1276
bitmap_free(opmask);
1277
return count;
1278
1279
err:
1280
bitmap_free(opmask);
1281
return rc;
1282
}
1283
1284
static struct device_attribute dev_attr_wq_op_config =
1285
__ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store);
1286
1287
static ssize_t wq_driver_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1288
{
1289
struct idxd_wq *wq = confdev_to_wq(dev);
1290
1291
return sysfs_emit(buf, "%s\n", wq->driver_name);
1292
}
1293
1294
static ssize_t wq_driver_name_store(struct device *dev, struct device_attribute *attr,
1295
const char *buf, size_t count)
1296
{
1297
struct idxd_wq *wq = confdev_to_wq(dev);
1298
char *input, *pos;
1299
1300
if (wq->state != IDXD_WQ_DISABLED)
1301
return -EPERM;
1302
1303
if (strlen(buf) > DRIVER_NAME_SIZE || strlen(buf) == 0)
1304
return -EINVAL;
1305
1306
input = kstrndup(buf, count, GFP_KERNEL);
1307
if (!input)
1308
return -ENOMEM;
1309
1310
pos = strim(input);
1311
memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
1312
sprintf(wq->driver_name, "%s", pos);
1313
kfree(input);
1314
return count;
1315
}
1316
1317
static struct device_attribute dev_attr_wq_driver_name =
1318
__ATTR(driver_name, 0644, wq_driver_name_show, wq_driver_name_store);
1319
1320
static struct attribute *idxd_wq_attributes[] = {
1321
&dev_attr_wq_clients.attr,
1322
&dev_attr_wq_state.attr,
1323
&dev_attr_wq_group_id.attr,
1324
&dev_attr_wq_mode.attr,
1325
&dev_attr_wq_size.attr,
1326
&dev_attr_wq_priority.attr,
1327
&dev_attr_wq_block_on_fault.attr,
1328
&dev_attr_wq_threshold.attr,
1329
&dev_attr_wq_type.attr,
1330
&dev_attr_wq_name.attr,
1331
&dev_attr_wq_cdev_minor.attr,
1332
&dev_attr_wq_max_transfer_size.attr,
1333
&dev_attr_wq_max_batch_size.attr,
1334
&dev_attr_wq_ats_disable.attr,
1335
&dev_attr_wq_prs_disable.attr,
1336
&dev_attr_wq_occupancy.attr,
1337
&dev_attr_wq_enqcmds_retries.attr,
1338
&dev_attr_wq_op_config.attr,
1339
&dev_attr_wq_driver_name.attr,
1340
NULL,
1341
};
1342
1343
/* A WQ attr is invisible if the feature is not supported in WQCAP. */
1344
#define idxd_wq_attr_invisible(name, cap_field, a, idxd) \
1345
((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
1346
1347
static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
1348
struct idxd_device *idxd)
1349
{
1350
/* Intel IAA does not support batch processing, make it invisible */
1351
return attr == &dev_attr_wq_max_batch_size.attr &&
1352
idxd->data->type == IDXD_TYPE_IAX;
1353
}
1354
1355
static umode_t idxd_wq_attr_visible(struct kobject *kobj,
1356
struct attribute *attr, int n)
1357
{
1358
struct device *dev = container_of(kobj, struct device, kobj);
1359
struct idxd_wq *wq = confdev_to_wq(dev);
1360
struct idxd_device *idxd = wq->idxd;
1361
1362
if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd))
1363
return 0;
1364
1365
if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
1366
return 0;
1367
1368
if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd))
1369
return 0;
1370
1371
if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd))
1372
return 0;
1373
1374
return attr->mode;
1375
}
1376
1377
static const struct attribute_group idxd_wq_attribute_group = {
1378
.attrs = idxd_wq_attributes,
1379
.is_visible = idxd_wq_attr_visible,
1380
};
1381
1382
static const struct attribute_group *idxd_wq_attribute_groups[] = {
1383
&idxd_wq_attribute_group,
1384
NULL,
1385
};
1386
1387
static void idxd_conf_wq_release(struct device *dev)
1388
{
1389
struct idxd_wq *wq = confdev_to_wq(dev);
1390
1391
bitmap_free(wq->opcap_bmap);
1392
kfree(wq->wqcfg);
1393
xa_destroy(&wq->upasid_xa);
1394
kfree(wq);
1395
}
1396
1397
const struct device_type idxd_wq_device_type = {
1398
.name = "wq",
1399
.release = idxd_conf_wq_release,
1400
.groups = idxd_wq_attribute_groups,
1401
};
1402
1403
/* IDXD device attribs */
1404
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1405
char *buf)
1406
{
1407
struct idxd_device *idxd = confdev_to_idxd(dev);
1408
1409
return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1410
}
1411
static DEVICE_ATTR_RO(version);
1412
1413
static ssize_t max_work_queues_size_show(struct device *dev,
1414
struct device_attribute *attr,
1415
char *buf)
1416
{
1417
struct idxd_device *idxd = confdev_to_idxd(dev);
1418
1419
return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1420
}
1421
static DEVICE_ATTR_RO(max_work_queues_size);
1422
1423
static ssize_t max_groups_show(struct device *dev,
1424
struct device_attribute *attr, char *buf)
1425
{
1426
struct idxd_device *idxd = confdev_to_idxd(dev);
1427
1428
return sysfs_emit(buf, "%u\n", idxd->max_groups);
1429
}
1430
static DEVICE_ATTR_RO(max_groups);
1431
1432
static ssize_t max_work_queues_show(struct device *dev,
1433
struct device_attribute *attr, char *buf)
1434
{
1435
struct idxd_device *idxd = confdev_to_idxd(dev);
1436
1437
return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1438
}
1439
static DEVICE_ATTR_RO(max_work_queues);
1440
1441
static ssize_t max_engines_show(struct device *dev,
1442
struct device_attribute *attr, char *buf)
1443
{
1444
struct idxd_device *idxd = confdev_to_idxd(dev);
1445
1446
return sysfs_emit(buf, "%u\n", idxd->max_engines);
1447
}
1448
static DEVICE_ATTR_RO(max_engines);
1449
1450
static ssize_t numa_node_show(struct device *dev,
1451
struct device_attribute *attr, char *buf)
1452
{
1453
struct idxd_device *idxd = confdev_to_idxd(dev);
1454
1455
return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1456
}
1457
static DEVICE_ATTR_RO(numa_node);
1458
1459
static ssize_t max_batch_size_show(struct device *dev,
1460
struct device_attribute *attr, char *buf)
1461
{
1462
struct idxd_device *idxd = confdev_to_idxd(dev);
1463
1464
return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1465
}
1466
static DEVICE_ATTR_RO(max_batch_size);
1467
1468
static ssize_t max_transfer_size_show(struct device *dev,
1469
struct device_attribute *attr,
1470
char *buf)
1471
{
1472
struct idxd_device *idxd = confdev_to_idxd(dev);
1473
1474
return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1475
}
1476
static DEVICE_ATTR_RO(max_transfer_size);
1477
1478
static ssize_t op_cap_show(struct device *dev,
1479
struct device_attribute *attr, char *buf)
1480
{
1481
struct idxd_device *idxd = confdev_to_idxd(dev);
1482
1483
return op_cap_show_common(dev, buf, idxd->opcap_bmap);
1484
}
1485
static DEVICE_ATTR_RO(op_cap);
1486
1487
static ssize_t gen_cap_show(struct device *dev,
1488
struct device_attribute *attr, char *buf)
1489
{
1490
struct idxd_device *idxd = confdev_to_idxd(dev);
1491
1492
return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1493
}
1494
static DEVICE_ATTR_RO(gen_cap);
1495
1496
static ssize_t configurable_show(struct device *dev,
1497
struct device_attribute *attr, char *buf)
1498
{
1499
struct idxd_device *idxd = confdev_to_idxd(dev);
1500
1501
return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1502
}
1503
static DEVICE_ATTR_RO(configurable);
1504
1505
static ssize_t clients_show(struct device *dev,
1506
struct device_attribute *attr, char *buf)
1507
{
1508
struct idxd_device *idxd = confdev_to_idxd(dev);
1509
int count = 0, i;
1510
1511
spin_lock(&idxd->dev_lock);
1512
for (i = 0; i < idxd->max_wqs; i++) {
1513
struct idxd_wq *wq = idxd->wqs[i];
1514
1515
count += wq->client_count;
1516
}
1517
spin_unlock(&idxd->dev_lock);
1518
1519
return sysfs_emit(buf, "%d\n", count);
1520
}
1521
static DEVICE_ATTR_RO(clients);
1522
1523
static ssize_t pasid_enabled_show(struct device *dev,
1524
struct device_attribute *attr, char *buf)
1525
{
1526
struct idxd_device *idxd = confdev_to_idxd(dev);
1527
1528
return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
1529
}
1530
static DEVICE_ATTR_RO(pasid_enabled);
1531
1532
static ssize_t state_show(struct device *dev,
1533
struct device_attribute *attr, char *buf)
1534
{
1535
struct idxd_device *idxd = confdev_to_idxd(dev);
1536
1537
switch (idxd->state) {
1538
case IDXD_DEV_DISABLED:
1539
return sysfs_emit(buf, "disabled\n");
1540
case IDXD_DEV_ENABLED:
1541
return sysfs_emit(buf, "enabled\n");
1542
case IDXD_DEV_HALTED:
1543
return sysfs_emit(buf, "halted\n");
1544
}
1545
1546
return sysfs_emit(buf, "unknown\n");
1547
}
1548
static DEVICE_ATTR_RO(state);
1549
1550
static ssize_t errors_show(struct device *dev,
1551
struct device_attribute *attr, char *buf)
1552
{
1553
struct idxd_device *idxd = confdev_to_idxd(dev);
1554
DECLARE_BITMAP(swerr_bmap, 256);
1555
1556
bitmap_zero(swerr_bmap, 256);
1557
spin_lock(&idxd->dev_lock);
1558
multi_u64_to_bmap(swerr_bmap, &idxd->sw_err.bits[0], 4);
1559
spin_unlock(&idxd->dev_lock);
1560
return sysfs_emit(buf, "%*pb\n", 256, swerr_bmap);
1561
}
1562
static DEVICE_ATTR_RO(errors);
1563
1564
static ssize_t max_read_buffers_show(struct device *dev,
1565
struct device_attribute *attr, char *buf)
1566
{
1567
struct idxd_device *idxd = confdev_to_idxd(dev);
1568
1569
return sysfs_emit(buf, "%u\n", idxd->max_rdbufs);
1570
}
1571
1572
static ssize_t max_tokens_show(struct device *dev,
1573
struct device_attribute *attr, char *buf)
1574
{
1575
dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n");
1576
return max_read_buffers_show(dev, attr, buf);
1577
}
1578
1579
static DEVICE_ATTR_RO(max_tokens); /* deprecated */
1580
static DEVICE_ATTR_RO(max_read_buffers);
1581
1582
static ssize_t read_buffer_limit_show(struct device *dev,
1583
struct device_attribute *attr, char *buf)
1584
{
1585
struct idxd_device *idxd = confdev_to_idxd(dev);
1586
1587
return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit);
1588
}
1589
1590
static ssize_t token_limit_show(struct device *dev,
1591
struct device_attribute *attr, char *buf)
1592
{
1593
dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n");
1594
return read_buffer_limit_show(dev, attr, buf);
1595
}
1596
1597
static ssize_t read_buffer_limit_store(struct device *dev,
1598
struct device_attribute *attr,
1599
const char *buf, size_t count)
1600
{
1601
struct idxd_device *idxd = confdev_to_idxd(dev);
1602
unsigned long val;
1603
int rc;
1604
1605
rc = kstrtoul(buf, 10, &val);
1606
if (rc < 0)
1607
return -EINVAL;
1608
1609
if (idxd->state == IDXD_DEV_ENABLED)
1610
return -EPERM;
1611
1612
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1613
return -EPERM;
1614
1615
if (!idxd->hw.group_cap.rdbuf_limit)
1616
return -EPERM;
1617
1618
if (val > idxd->hw.group_cap.total_rdbufs)
1619
return -EINVAL;
1620
1621
idxd->rdbuf_limit = val;
1622
return count;
1623
}
1624
1625
static ssize_t token_limit_store(struct device *dev,
1626
struct device_attribute *attr,
1627
const char *buf, size_t count)
1628
{
1629
dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n");
1630
return read_buffer_limit_store(dev, attr, buf, count);
1631
}
1632
1633
static DEVICE_ATTR_RW(token_limit); /* deprecated */
1634
static DEVICE_ATTR_RW(read_buffer_limit);
1635
1636
static ssize_t cdev_major_show(struct device *dev,
1637
struct device_attribute *attr, char *buf)
1638
{
1639
struct idxd_device *idxd = confdev_to_idxd(dev);
1640
1641
return sysfs_emit(buf, "%u\n", idxd->major);
1642
}
1643
static DEVICE_ATTR_RO(cdev_major);
1644
1645
static ssize_t cmd_status_show(struct device *dev,
1646
struct device_attribute *attr, char *buf)
1647
{
1648
struct idxd_device *idxd = confdev_to_idxd(dev);
1649
1650
return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1651
}
1652
1653
static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
1654
const char *buf, size_t count)
1655
{
1656
struct idxd_device *idxd = confdev_to_idxd(dev);
1657
1658
idxd->cmd_status = 0;
1659
return count;
1660
}
1661
static DEVICE_ATTR_RW(cmd_status);
1662
1663
static ssize_t iaa_cap_show(struct device *dev,
1664
struct device_attribute *attr, char *buf)
1665
{
1666
struct idxd_device *idxd = confdev_to_idxd(dev);
1667
1668
if (idxd->hw.version < DEVICE_VERSION_2)
1669
return -EOPNOTSUPP;
1670
1671
return sysfs_emit(buf, "%#llx\n", idxd->hw.iaa_cap.bits);
1672
}
1673
static DEVICE_ATTR_RO(iaa_cap);
1674
1675
static ssize_t event_log_size_show(struct device *dev,
1676
struct device_attribute *attr, char *buf)
1677
{
1678
struct idxd_device *idxd = confdev_to_idxd(dev);
1679
1680
if (!idxd->evl)
1681
return -EOPNOTSUPP;
1682
1683
return sysfs_emit(buf, "%u\n", idxd->evl->size);
1684
}
1685
1686
static ssize_t event_log_size_store(struct device *dev,
1687
struct device_attribute *attr,
1688
const char *buf, size_t count)
1689
{
1690
struct idxd_device *idxd = confdev_to_idxd(dev);
1691
unsigned long val;
1692
int rc;
1693
1694
if (!idxd->evl)
1695
return -EOPNOTSUPP;
1696
1697
rc = kstrtoul(buf, 10, &val);
1698
if (rc < 0)
1699
return -EINVAL;
1700
1701
if (idxd->state == IDXD_DEV_ENABLED)
1702
return -EPERM;
1703
1704
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1705
return -EPERM;
1706
1707
if (val < IDXD_EVL_SIZE_MIN || val > IDXD_EVL_SIZE_MAX ||
1708
(val * evl_ent_size(idxd) > ULONG_MAX - idxd->evl->dma))
1709
return -EINVAL;
1710
1711
idxd->evl->size = val;
1712
return count;
1713
}
1714
static DEVICE_ATTR_RW(event_log_size);
1715
1716
static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr,
1717
struct idxd_device *idxd)
1718
{
1719
/* Intel IAA does not support batch processing, make it invisible */
1720
return attr == &dev_attr_max_batch_size.attr &&
1721
idxd->data->type == IDXD_TYPE_IAX;
1722
}
1723
1724
static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr,
1725
struct idxd_device *idxd)
1726
{
1727
/*
1728
* Intel IAA does not support Read Buffer allocation control,
1729
* make these attributes invisible.
1730
*/
1731
return (attr == &dev_attr_max_tokens.attr ||
1732
attr == &dev_attr_max_read_buffers.attr ||
1733
attr == &dev_attr_token_limit.attr ||
1734
attr == &dev_attr_read_buffer_limit.attr) &&
1735
idxd->data->type == IDXD_TYPE_IAX;
1736
}
1737
1738
static bool idxd_device_attr_iaa_cap_invisible(struct attribute *attr,
1739
struct idxd_device *idxd)
1740
{
1741
return attr == &dev_attr_iaa_cap.attr &&
1742
(idxd->data->type != IDXD_TYPE_IAX ||
1743
idxd->hw.version < DEVICE_VERSION_2);
1744
}
1745
1746
static bool idxd_device_attr_event_log_size_invisible(struct attribute *attr,
1747
struct idxd_device *idxd)
1748
{
1749
return (attr == &dev_attr_event_log_size.attr &&
1750
!idxd->hw.gen_cap.evl_support);
1751
}
1752
1753
static umode_t idxd_device_attr_visible(struct kobject *kobj,
1754
struct attribute *attr, int n)
1755
{
1756
struct device *dev = container_of(kobj, struct device, kobj);
1757
struct idxd_device *idxd = confdev_to_idxd(dev);
1758
1759
if (idxd_device_attr_max_batch_size_invisible(attr, idxd))
1760
return 0;
1761
1762
if (idxd_device_attr_read_buffers_invisible(attr, idxd))
1763
return 0;
1764
1765
if (idxd_device_attr_iaa_cap_invisible(attr, idxd))
1766
return 0;
1767
1768
if (idxd_device_attr_event_log_size_invisible(attr, idxd))
1769
return 0;
1770
1771
return attr->mode;
1772
}
1773
1774
static struct attribute *idxd_device_attributes[] = {
1775
&dev_attr_version.attr,
1776
&dev_attr_max_groups.attr,
1777
&dev_attr_max_work_queues.attr,
1778
&dev_attr_max_work_queues_size.attr,
1779
&dev_attr_max_engines.attr,
1780
&dev_attr_numa_node.attr,
1781
&dev_attr_max_batch_size.attr,
1782
&dev_attr_max_transfer_size.attr,
1783
&dev_attr_op_cap.attr,
1784
&dev_attr_gen_cap.attr,
1785
&dev_attr_configurable.attr,
1786
&dev_attr_clients.attr,
1787
&dev_attr_pasid_enabled.attr,
1788
&dev_attr_state.attr,
1789
&dev_attr_errors.attr,
1790
&dev_attr_max_tokens.attr,
1791
&dev_attr_max_read_buffers.attr,
1792
&dev_attr_token_limit.attr,
1793
&dev_attr_read_buffer_limit.attr,
1794
&dev_attr_cdev_major.attr,
1795
&dev_attr_cmd_status.attr,
1796
&dev_attr_iaa_cap.attr,
1797
&dev_attr_event_log_size.attr,
1798
NULL,
1799
};
1800
1801
static const struct attribute_group idxd_device_attribute_group = {
1802
.attrs = idxd_device_attributes,
1803
.is_visible = idxd_device_attr_visible,
1804
};
1805
1806
static const struct attribute_group *idxd_attribute_groups[] = {
1807
&idxd_device_attribute_group,
1808
NULL,
1809
};
1810
1811
static void idxd_conf_device_release(struct device *dev)
1812
{
1813
struct idxd_device *idxd = confdev_to_idxd(dev);
1814
1815
kfree(idxd->groups);
1816
bitmap_free(idxd->wq_enable_map);
1817
kfree(idxd->wqs);
1818
kfree(idxd->engines);
1819
kfree(idxd->evl);
1820
kmem_cache_destroy(idxd->evl_cache);
1821
ida_free(&idxd_ida, idxd->id);
1822
bitmap_free(idxd->opcap_bmap);
1823
kfree(idxd);
1824
}
1825
1826
const struct device_type dsa_device_type = {
1827
.name = "dsa",
1828
.release = idxd_conf_device_release,
1829
.groups = idxd_attribute_groups,
1830
};
1831
1832
const struct device_type iax_device_type = {
1833
.name = "iax",
1834
.release = idxd_conf_device_release,
1835
.groups = idxd_attribute_groups,
1836
};
1837
1838
static int idxd_register_engine_devices(struct idxd_device *idxd)
1839
{
1840
struct idxd_engine *engine;
1841
int i, j, rc;
1842
1843
for (i = 0; i < idxd->max_engines; i++) {
1844
engine = idxd->engines[i];
1845
rc = device_add(engine_confdev(engine));
1846
if (rc < 0)
1847
goto cleanup;
1848
}
1849
1850
return 0;
1851
1852
cleanup:
1853
j = i - 1;
1854
for (; i < idxd->max_engines; i++) {
1855
engine = idxd->engines[i];
1856
put_device(engine_confdev(engine));
1857
}
1858
1859
while (j--) {
1860
engine = idxd->engines[j];
1861
device_unregister(engine_confdev(engine));
1862
}
1863
return rc;
1864
}
1865
1866
static int idxd_register_group_devices(struct idxd_device *idxd)
1867
{
1868
struct idxd_group *group;
1869
int i, j, rc;
1870
1871
for (i = 0; i < idxd->max_groups; i++) {
1872
group = idxd->groups[i];
1873
rc = device_add(group_confdev(group));
1874
if (rc < 0)
1875
goto cleanup;
1876
}
1877
1878
return 0;
1879
1880
cleanup:
1881
j = i - 1;
1882
for (; i < idxd->max_groups; i++) {
1883
group = idxd->groups[i];
1884
put_device(group_confdev(group));
1885
}
1886
1887
while (j--) {
1888
group = idxd->groups[j];
1889
device_unregister(group_confdev(group));
1890
}
1891
return rc;
1892
}
1893
1894
static int idxd_register_wq_devices(struct idxd_device *idxd)
1895
{
1896
struct idxd_wq *wq;
1897
int i, rc, j;
1898
1899
for (i = 0; i < idxd->max_wqs; i++) {
1900
wq = idxd->wqs[i];
1901
rc = device_add(wq_confdev(wq));
1902
if (rc < 0)
1903
goto cleanup;
1904
}
1905
1906
return 0;
1907
1908
cleanup:
1909
j = i - 1;
1910
for (; i < idxd->max_wqs; i++) {
1911
wq = idxd->wqs[i];
1912
put_device(wq_confdev(wq));
1913
}
1914
1915
while (j--) {
1916
wq = idxd->wqs[j];
1917
device_unregister(wq_confdev(wq));
1918
}
1919
return rc;
1920
}
1921
1922
int idxd_register_devices(struct idxd_device *idxd)
1923
{
1924
struct device *dev = &idxd->pdev->dev;
1925
int rc, i;
1926
1927
rc = device_add(idxd_confdev(idxd));
1928
if (rc < 0)
1929
return rc;
1930
1931
rc = idxd_register_wq_devices(idxd);
1932
if (rc < 0) {
1933
dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1934
goto err_wq;
1935
}
1936
1937
rc = idxd_register_engine_devices(idxd);
1938
if (rc < 0) {
1939
dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1940
goto err_engine;
1941
}
1942
1943
rc = idxd_register_group_devices(idxd);
1944
if (rc < 0) {
1945
dev_dbg(dev, "Group device registering failed: %d\n", rc);
1946
goto err_group;
1947
}
1948
1949
return 0;
1950
1951
err_group:
1952
for (i = 0; i < idxd->max_engines; i++)
1953
device_unregister(engine_confdev(idxd->engines[i]));
1954
err_engine:
1955
for (i = 0; i < idxd->max_wqs; i++)
1956
device_unregister(wq_confdev(idxd->wqs[i]));
1957
err_wq:
1958
device_del(idxd_confdev(idxd));
1959
return rc;
1960
}
1961
1962
void idxd_unregister_devices(struct idxd_device *idxd)
1963
{
1964
int i;
1965
1966
for (i = 0; i < idxd->max_wqs; i++) {
1967
struct idxd_wq *wq = idxd->wqs[i];
1968
1969
device_unregister(wq_confdev(wq));
1970
}
1971
1972
for (i = 0; i < idxd->max_engines; i++) {
1973
struct idxd_engine *engine = idxd->engines[i];
1974
1975
device_unregister(engine_confdev(engine));
1976
}
1977
1978
for (i = 0; i < idxd->max_groups; i++) {
1979
struct idxd_group *group = idxd->groups[i];
1980
1981
device_unregister(group_confdev(group));
1982
}
1983
}
1984
1985